Merge "drivers:input:vl53l0x add some calibration interface of vl53l0x"
diff --git a/Android.bp b/Android.bp
new file mode 100644
index 0000000..4341e3a
--- /dev/null
+++ b/Android.bp
@@ -0,0 +1,27 @@
+cc_binary_host {
+ name: "unifdef",
+ srcs: ["scripts/unifdef.c"],
+ sanitize: {
+ never: true,
+ }
+}
+
+gensrcs {
+ name: "qseecom-kernel-includes",
+
+ // move to out/ as root for header generation because of scripts/unifdef
+ // storage - at the expense of extra ../ references
+ cmd: "pushd out && mkdir -p scripts && rm -f scripts/unifdef && ln -s ../../$(location unifdef) scripts/unifdef && ../$(location scripts/headers_install.sh) `dirname ../$(out)` ../ $(in) && popd",
+
+ tools: ["unifdef"],
+ tool_files: ["scripts/headers_install.sh"],
+ export_include_dirs: ["include/uapi"],
+ srcs: ["include/uapi/linux/qseecom.h"],
+ output_extension: "h",
+}
+
+cc_library_headers {
+ name: "qseecom-kernel-headers",
+ generated_headers: ["qseecom-kernel-includes"],
+ export_generated_headers: ["qseecom-kernel-includes"],
+}
diff --git a/Documentation/devicetree/bindings/arm/msm/heap-sharing.txt b/Documentation/devicetree/bindings/arm/msm/heap-sharing.txt
index e63d09b..de2a963 100644
--- a/Documentation/devicetree/bindings/arm/msm/heap-sharing.txt
+++ b/Documentation/devicetree/bindings/arm/msm/heap-sharing.txt
@@ -29,6 +29,10 @@
- qcom,allocate-boot-time: Indicates whether clients needs boot time memory allocation.
+- qcom,allocate-on-request: Indicates memory allocation happens only when client requests.
+
+/* "qcom,allocate-boot-time" and "qcom,allocate-on-request" are mutually exclusive properties. */
+
Example:
qcom,memshare {
diff --git a/Documentation/devicetree/bindings/arm/msm/lpm-workarounds.txt b/Documentation/devicetree/bindings/arm/msm/lpm-workarounds.txt
new file mode 100644
index 0000000..0304035
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/lpm-workarounds.txt
@@ -0,0 +1,55 @@
+* LPM Workarounds
+
+The required properties are:
+
+- compatible: "qcom,lpm-workarounds"
+
+The optional properties are:
+- reg: The physical address and the size of the l1_l2_gcc and l2_pwr_sts
+ regitsters of performance cluster.
+
+- reg-names: "l2_pwr_sts" - string to identify l2_pwr_sts physical address.
+ "l1_l2_gcc" - string to identify l1_l2_gcc physical address.
+
+- qcom,lpm-wa-cx-turbo-unvote: Indicates the workaround to unvote CX turbo
+ vote when system is coming out of rpm assisted power collaspe.
+ lpm-cx-supply is required if this is present.
+
+- lpm-cx-supply: will hold handle for CX regulator supply which is used
+ to unvote.
+
+- qcom,lpm-wa-skip-l2-spm: Due to a hardware bug on 8939 and 8909, secure
+ world needs to disable and enable L2 SPM to get the proper context
+ in secure watchdog bite cases. With this workaround there is a race
+ in programming L2 SPM between HLOS and secure world. This leads to
+ stability issues. To avoid this program L2 SPM only in secure world
+ based on the L2 mode flag passed. Set lpm-wa-skip-l2-spm node if this
+ is required.
+
+- qcom,lpm-wa-dynamic-clock-gating: Due to a hardware bug on 8952, L1/L2 dynamic
+ clock gating needs to be enabled by software for performance cluster
+ cores and L2. Set lpm-wa-dynamic-clock-gating node if this workaround is
+ required.
+
+- qcom,cpu-offline-mask: Dynamic clock gating should be enabled when cluster is
+ in L2 PC. Each bit of cpu-offline-mask lists the cpu no. to hotplug by KTM
+ driver.
+
+- qcom,non-boot-cpu-index: will hold index of non boot cluster cpu.
+
+- qcom,l1-l2-gcc-secure: indicates L1/L2 clock enabling register is secure.
+
+Example:
+
+qcom,lpm-workarounds {
+ compatible = "qcom,lpm-workarounds";
+ reg = <0x0B011018 0x4>,
+ <0x0B011088 0x4>;
+ reg-names = "l2-pwr-sts", "l1-l2-gcc";
+ lpm-cx-supply = <&pm8916_s2_corner>;
+ qcom,lpm-wa-cx-turbo-unvote;
+ qcom,lpm-wa-skip-l2-spm;
+ qcom,lpm-wa-dynamic-clock-gating;
+ qcom,cpu-offline-mask = "0xF";
+ qcom,non-boot-cpu-index = <4>;
+}
diff --git a/Documentation/devicetree/bindings/arm/msm/pm.txt b/Documentation/devicetree/bindings/arm/msm/pm.txt
new file mode 100644
index 0000000..b66d4a6
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/pm.txt
@@ -0,0 +1,49 @@
+* MSM PM
+
+PM is the low power management device for MSM (Snapdragon class) chipsets.
+This device sets up different components to do low power modes and registers with
+the kernel to be notified of idle and suspend states and when called, follows
+through the set of instructions in putting the application cores to the lowest
+power mode possible.
+The PC debug counter reserves 16 registers in the IMEM memory space which maintains
+a count on the state of power collapse on each core. This count will be useful to
+debug the power collapse state on each core.
+
+The required properties for PM are:
+
+- compatible: "qcom,pm"
+
+The optional properties are:
+
+- reg: physical IMEM address reserved for PC counters and the size
+- qcom,use-sync-timer: Indicates whether the target uses the synchronized QTimer.
+- qcom,synced-clocks: Indicates that all cpus running off a single clock source and to
+ instantiate the necessary clock source.
+- qcom,pc-resets-timer: Indicates that the timer gets reset during power collapse.
+- qcom,tz-flushes-cache: Indicates that TZ flushes all of the cache during
+power collapse. MSM PM can decide to not perform cache flush operations to
+reduce latency associated with L2 PC.
+- qcom,saw-turns-off-pll: Indicates that the CPU's PLL can be managed from SAW
+ hardware. On such targets software management of PLL is not required. If
+ this property is specified then qcom,synced-clocks would be ignored.
+- qcom,no-pll-switch-for-retention: Boolean property, to indicate that the cpu
+ clock can be sourced even from the HFPLL even when the cpu is in
+ retention, and need not be switched to an always on pll. If this flag
+ is set then the cpu clock is not ramped down when entering retention or
+ ramped up on exiting retention.
+
+Example 1:
+
+qcom,pm@fe800664 {
+ compatible = "qcom,pm";
+ reg = <0xfe800664 0x40>;
+ qcom,use-sync-timer;
+ };
+
+Example 2:
+
+qcom,pm@fe800664 {
+ compatible = "qcom,pm";
+ reg = <0xfe800664 0x40>;
+ qcom,saw-turns-off-pll;
+ };
diff --git a/Documentation/devicetree/bindings/arm/msm/pm_snoc_client.txt b/Documentation/devicetree/bindings/arm/msm/pm_snoc_client.txt
new file mode 100644
index 0000000..4f7111f
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/pm_snoc_client.txt
@@ -0,0 +1,35 @@
+* MSM PM SNOC client
+
+MSM PM SNOC client device is used to setup a bus request for 100 Mhz for the
+SNOC bus when the Apps cores are active. This bus request helps mitigate the
+exit latency from power collapse in cases where there aren't any active bus
+requests for SNOC.
+
+This device is dependent on the pm-8x60 device, which configures the low power
+mode of respective cores.
+
+The required properties of this device are:
+
+- compatible: qcom,pm-snoc-client
+- qcom,msm-bus,name: String representing the client-name
+- qcom,msm-bus,num-cases: Total number of usecases
+- qcom,msm-bus,active-only: Boolean context flag for requests in active or
+ dual (active & sleep) contex
+- qcom,msm-bus,num-paths: Total number of master-slave pairs
+- qcom,msm-bus,vectors-KBps: Arrays of unsigned integers representing:
+ master-id, slave-id, arbitrated bandwidth
+ in KBps, instantaneous bandwidth in KBps
+
+
+Example:
+ qcom,pm-snoc-client {
+ compatible = "qcom,pm-snoc-client";
+ qcom,msm-bus,name = "ocimem_snoc";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors =
+ <22 512 0 0>,
+ <22 512 320000 3200000>;
+ };
+
diff --git a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
index 24290c8..c17970c 100644
--- a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
@@ -521,6 +521,9 @@
value 0.
- qcom,mdss-dsi-dma-schedule-line: An integer value indicates the line number after vertical active
region, at which command DMA needs to be triggered.
+- qcom,mdss-dsi-panel-cmds-only-by-right: Boolean used to mention whether the panel support DSI1 or
+ DSI0 to send commands. If this was set, that mean the panel only support
+ DSI1 to send commands, otherwise DSI0 will send comands.
Required properties for sub-nodes: None
Optional properties:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
index 4c29cda..fdc3418 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
@@ -57,6 +57,10 @@
occupied by the redistributors. Required if more than one such
region is present.
+- ignored-save-restore-irqs: Array of u32 elements, specifying the interrupts
+ which are ignored while doing gicd save/restore. Maximum of 10 elements
+ is supported at present.
+
Sub-nodes:
PPI affinity can be expressed as a single "ppi-partitions" node,
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
index 4839df4..23e5f20 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
@@ -87,11 +87,6 @@
When qcom,enable-static-cb is selected, indicates which
iommu context banks may be used by HLOS.
-- qcom,hibernation-support:
- A boolean, indicates that hibernation should be supported and
- all secure usecases should be disabled, since they cannot be
- restored properly.
-
- qcom,skip-init : Disable resetting configuration for all context banks
during device reset. This is useful for targets where
some context banks are dedicated to other execution
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-haptics.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-haptics.txt
index 1a76d5d..258504e 100644
--- a/Documentation/devicetree/bindings/leds/leds-qpnp-haptics.txt
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp-haptics.txt
@@ -116,6 +116,11 @@
Definition: Short circuit debounce cycles for internal PWM.
Allowed values: 0, 8, 16 or 32.
+- vcc_pon-supply
+ Usage: optional
+ Value type: <phandle>
+ Definition: PON driver regulator required to force MBG_ON
+
Following properties are specific only to LRA vibrators.
- qcom,lra-auto-mode
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
index 66eaae1..e1e486f 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
@@ -71,6 +71,11 @@
Value type: <u32>
Definition: CAM HW Version information.
+- camnoc-axi-min-ib-bw
+ Usage: optional
+ Value type: <u64>
+ Definition: Min camnoc axi bw for the given target.
+
- regulator-names
Usage: required
Value type: <string>
diff --git a/Documentation/devicetree/bindings/misc/fpc,fpc1028.txt b/Documentation/devicetree/bindings/misc/fpc,fpc1028.txt
new file mode 100644
index 0000000..233c7cc
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/fpc,fpc1028.txt
@@ -0,0 +1,85 @@
+Fingerprint Cards AB. Fpc1028 driver
+
+The fpc1028 fingerprint sensor is connected to the host processor via SPI.
+The sensor will generates interrupts when the user touches the sensor.
+The host controller is expected to read data over SPI and pass the data to
+the rest of the system.
+
+This binding document describes the properties for this module.
+
+Properties:
+
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: It must be "fpc,fpc1020"
+
+- interrupts
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Peripheral interrupt specifier.
+
+- interrupt-parent
+ Usage: required
+ Value type: <phandle>
+ Definition: phandle of the interrupt controller which services the
+ summary interrupt.
+
+- fpc,gpio_rst
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: GPIO which connecting to the reset pin of fpc1028
+
+- fpc,gpio_irq
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Specifies the GPIO which connecting to the irq pin of fpc1028.
+
+- vcc_spi-supply
+ Usage: required
+ Value type: <phandle>
+ Definition: The phandle of the regulator which supplies fpc1028 spi bus core.
+
+- vcc_io-supply
+ Usage: required
+ Value type: <phandle>
+ Definition: The phandle of the regulator which supplies fpc1028 io pins.
+
+- vcc_ana-supply
+ Usage: required
+ Value type: <phandle>
+ Definition: The phandle of the regulator which supplies fpc1028 analog circuit.
+
+- pinctrl-names:
+ Usage: required
+ Value type: <string>
+ Definition: Pinctrl state names for each pin group configuration.
+ eg:"fpc1020_reset_reset", "fpc1020_reset_active", "fpc1020_irq_active".
+ refer to "Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt"
+
+- pinctrl-n:
+ Usage: required
+ Value type: <string>
+ Definition: pinctrl state for each pin group
+ refer to "Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt"
+
+
+Example:
+
+ fpc1020 {
+ compatible = "fpc,fpc1020";
+ interrupt-parent = <&tlmm>;
+ interrupts = <48 0>;
+ fpc,gpio_rst = <&tlmm 124 0x0>;
+ fpc,gpio_irq = <&tlmm 48 0>;
+ vcc_spi-supply = <&pm8953_l5>;
+ vdd_io-supply = <&pm8953_l5>;
+ vdd_ana-supply = <&pm8953_l5>;
+ fpc,enable-on-boot;
+ pinctrl-names = "fpc1020_reset_reset",
+ "fpc1020_reset_active",
+ "fpc1020_irq_active";
+ pinctrl-0 = <&msm_gpio_124>;
+ pinctrl-1 = <&msm_gpio_124_output_high>;
+ pinctrl-2 = <&msm_gpio_48>;
+ };
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
index 8b63075..917c2d0 100644
--- a/Documentation/devicetree/bindings/platform/msm/ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -118,6 +118,10 @@
controller phandle and "clk_ipa_clk" as macro for "iface_clk"
- clock-names: This property shall contain the clock input names used
by driver in same order as the clocks property.This should be "iface_clk"
+- emulator-bar0-offset: Specifies the offset, within PCIe BAR0, where
+ IPA/GSI programmable registers reside. This property is used only
+ with the IPA/GSI emulation system, which is connected to and
+ communicated with via PCIe.
IPA SMMU sub nodes
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg.txt
index f6a7a1b..1e44686 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg.txt
@@ -109,6 +109,10 @@
this. If this property is not specified,
low battery voltage threshold will be
configured to 4200 mV.
+- qcom,fg-rconn-mohm: Battery connector resistance (Rconn) in
+ milliohms. If Rconn is specified, then
+ Rslow values will be updated to account
+ it for an accurate ESR.
- qcom,cycle-counter-en: Boolean property which enables the cycle
counter feature. If this property is
present, then the following properties
@@ -143,6 +147,14 @@
battery voltage shadow and the current
predicted voltage in uV to initiate
capacity learning.
+- qcom,cl-max-limit-deciperc: The maximum percent that the capacity
+ cannot go above during any capacity
+ learning cycle. This property is in the
+ unit of .1% increments.
+- qcom,cl-min-limit-deciperc: The minimum percent that the capacity
+ cannot go below during any capacity
+ learning cycle. This property is in the
+ unit of .1% increments.
- qcom,capacity-estimation-on: A boolean property to have the fuel
gauge driver attempt to estimate the
battery capacity using battery
@@ -178,6 +190,97 @@
settings will be different from default.
Once SOC crosses 5%, ESR pulse timings
will be restored back to default.
+- qcom,fg-control-slope-limiter: A boolean property to specify if SOC
+ slope limiter coefficients needs to
+ be modified based on charging status
+ and battery temperature threshold.
+- qcom,fg-slope-limit-temp-threshold: Temperature threshold in decidegC used
+ for applying the slope coefficient based
+ on charging status and battery
+ temperature. If this property is not
+ specified, a default value of 100 (10C)
+ will be applied by default.
+- qcom,fg-slope-limit-low-temp-chg: When the temperature goes below the
+ specified temperature threshold and
+ battery is charging, slope coefficient
+ specified with this property will be
+ applied. If this property is not
+ specified, a default value of 45 will be
+ applied.
+- qcom,fg-slope-limit-low-temp-dischg: Same as "qcom,fg-slope-limit-low-temp-chg"
+ except this is when the battery is
+ discharging.
+- qcom,fg-slope-limit-high-temp-chg: When the temperature goes above the
+ specified temperature threshold and
+ battery is charging, slope coefficient
+ specified with this property will be
+ applied. If this property is not
+ specified, a default value of 2 will be
+ applied.
+- qcom,fg-slope-limit-high-temp-dischg: Same as "qcom,fg-slope-limit-high-temp-chg"
+ except this is when the battery is
+ discharging.
+- qcom,fg-dischg-voltage-gain-ctrl: A boolean property to specify if the
+ voltage gain needs to be modified
+ during discharging based on monotonic
+ soc.
+- qcom,fg-dischg-voltage-gain-soc: Array of monotonic SOC threshold values
+ to change the voltage gain settings
+ during discharge. This should be defined
+ in the ascending order and in the range
+ of 0-100. Array limit is set to 3.
+ If qcom,fg-dischg-voltage-gain-ctrl is
+ set, then this property should be
+ specified to apply the gain settings.
+- qcom,fg-dischg-med-voltage-gain: Array of voltage gain values that needs
+ to be applied to medC voltage gain when
+ the monotonic SOC goes below the SOC
+ threshold specified under
+ qcom,fg-dischg-voltage-gain-soc. Array
+ limit is set to 3.
+ If qcom,fg-dischg-voltage-gain-ctrl is
+ set, then this property should be
+ specified to apply the gain setting.
+- qcom,fg-dischg-high-voltage-gain: Array of voltage gain values that needs
+ to be applied to highC voltage gain when
+ the monotonic SOC goes below the SOC
+ threshold specified under
+ qcom,fg-dischg-voltage-gain-soc. Array
+ limit is set to 3.
+ If qcom,fg-dischg-voltage-gain-ctrl is
+ set, then this property should be
+ specified to apply the gain setting.
+- qcom,fg-use-vbat-low-empty-soc: A boolean property to specify whether
+ vbatt-low interrupt is used to handle
+ empty battery condition. If this is
+ not specified, empty battery condition
+ is detected by empty-soc interrupt.
+- qcom,fg-batt-temp-low-limit: Battery temperature (in decidegC) low
+ limit which will be used to validate
+ the battery temperature reading from FG.
+ If the battery temperature goes below
+ this limit, last read good temperature
+ will be notified to userspace. If this
+ limit is not specified, then the
+ default limit would be -60C.
+- qcom,fg-batt-temp-high-limit: Battery temperature (in decidegC) high
+ limit which will be used to validate
+ the battery temperature reading from FG.
+ If the battery temperature goes above
+ this limit, last read good temperature
+ will be notified to userspace. If this
+ limit is not specified, then the
+ default limit would be 150C.
+- qcom,fg-cc-soc-limit-pct: Percentage of CC_SOC before resetting
+ FG and restore the full CC_SOC value.
+- qcom,fg-restore-batt-info: A boolean property to specify whether
+ battery parameters needs to be
+ restored. If this feature is enabled,
+ then validating the battery parameters
+ by OCV/battery SOC, validation range
+ in percentage should be specified via
+ appropriate module parameters to make
+ it work properly.
qcom,fg-soc node required properties:
- reg : offset and length of the PMIC peripheral register map.
diff --git a/Documentation/devicetree/bindings/usb/msm-hsusb.txt b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
index 5256edd..431c32a 100644
--- a/Documentation/devicetree/bindings/usb/msm-hsusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
@@ -189,6 +189,8 @@
point to external connector device, which provide "USB-HOST" cable events.
A single phandle may be specified if a single connector device provides
both "USB" and "USB-HOST" events.
+- qcom,phy-id-high-as-peripheral: If present, specifies device to switch to device mode
+ if PHY ID state is high or host mode if PHY ID state is low.
Example HSUSB OTG controller device node :
usb@f9690000 {
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index e1be5fd..a22edb5 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -103,6 +103,7 @@
fcs Fairchild Semiconductor
firefly Firefly
focaltech FocalTech Systems Co.,Ltd
+fpc Fingerprint Cards AB.
friendlyarm Guangzhou FriendlyARM Computer Tech Co., Ltd
fsl Freescale Semiconductor
ge General Electric Company
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp-256.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp-256.dtsi
index dafd0b8..1428f37 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp-256.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp-256.dtsi
@@ -18,7 +18,8 @@
&usb {
status = "okay";
- extcon = <&vbus_detect>;
+ qcom,connector-type-uAB;
+ extcon = <0>, <0>, <0>, <&vbus_detect>;
};
&pcie_ep {
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp.dtsi
index 1428f37..81877b7 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp.dtsi
@@ -16,12 +16,6 @@
status = "okay";
};
-&usb {
- status = "okay";
- qcom,connector-type-uAB;
- extcon = <0>, <0>, <0>, <&vbus_detect>;
-};
-
&pcie_ep {
status = "okay";
};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp-256.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp-256.dtsi
index 6c5f3c3..d7c0d13 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp-256.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp-256.dtsi
@@ -18,7 +18,8 @@
&usb {
status = "okay";
- extcon = <&vbus_detect>;
+ qcom,connector-type-uAB;
+ extcon = <0>, <0>, <0>, <&vbus_detect>;
};
&pcie_ep {
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp.dtsi
index d7c0d13..6ceac6e 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp.dtsi
@@ -16,12 +16,6 @@
status = "okay";
};
-&usb {
- status = "okay";
- qcom,connector-type-uAB;
- extcon = <0>, <0>, <0>, <&vbus_detect>;
-};
-
&pcie_ep {
status = "okay";
};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
index 50bbebf..d1b2050 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -77,7 +77,7 @@
dump_mem: mem_dump_region {
compatible = "shared-dma-pool";
reusable;
- size = <0 0x2400000>;
+ size = <0x400000>;
};
};
@@ -933,24 +933,24 @@
<1 676 0 0>,
<143 777 0 0>,
/* SVS2 */
- <90 512 3616000 7232000>,
+ <90 512 900000 1800000>,
<90 585 300000 600000>,
- <1 676 90000 180000>, /*gcc_config_noc_clk_src */
+ <1 676 90000 179000>, /*gcc_config_noc_clk_src */
<143 777 0 120>, /* IB defined for IPA2X_clk in MHz*/
/* SVS */
- <90 512 6640000 13280000>,
+ <90 512 1530000 3060000>,
<90 585 400000 800000>,
- <1 676 100000 200000>,
+ <1 676 100000 199000>,
<143 777 0 250>, /* IB defined for IPA2X_clk in MHz*/
/* NOMINAL */
- <90 512 10400000 20800000>,
+ <90 512 2592000 5184000>,
<90 585 800000 1600000>,
- <1 676 200000 400000>,
+ <1 676 200000 399000>,
<143 777 0 440>, /* IB defined for IPA2X_clk in MHz*/
/* TURBO */
- <90 512 10400000 20800000>,
+ <90 512 2592000 5184000>,
<90 585 960000 1920000>,
- <1 676 266000 532000>,
+ <1 676 266000 531000>,
<143 777 0 500>; /* IB defined for IPA clk in MHz*/
qcom,bus-vector-names = "MIN", "SVS2", "SVS", "NOMINAL",
"TURBO";
diff --git a/arch/arm/configs/msm8909-perf_defconfig b/arch/arm/configs/msm8909-perf_defconfig
index 9f5001f..1eaf4ff 100644
--- a/arch/arm/configs/msm8909-perf_defconfig
+++ b/arch/arm/configs/msm8909-perf_defconfig
@@ -345,11 +345,13 @@
CONFIG_HID_MICROSOFT=y
CONFIG_HID_MONTEREY=y
CONFIG_HID_MULTITOUCH=y
-CONFIG_USB_DWC3=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_MSM=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_DUAL_ROLE_USB_INTF=y
-CONFIG_USB_MSM_SSPHY_QMP=y
-CONFIG_MSM_QUSB_PHY=y
CONFIG_USB_GADGET=y
CONFIG_USB_GADGET_DEBUG_FILES=y
CONFIG_USB_GADGET_DEBUG_FS=y
@@ -396,7 +398,6 @@
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_MAILBOX=y
CONFIG_ARM_SMMU=y
-CONFIG_QCOM_PM=y
CONFIG_MSM_SPM=y
CONFIG_MSM_L2_SPM=y
CONFIG_MSM_BOOT_STATS=y
diff --git a/arch/arm/configs/msm8909_defconfig b/arch/arm/configs/msm8909_defconfig
index c8087ad..5e6a68b 100644
--- a/arch/arm/configs/msm8909_defconfig
+++ b/arch/arm/configs/msm8909_defconfig
@@ -340,11 +340,13 @@
CONFIG_HID_MICROSOFT=y
CONFIG_HID_MONTEREY=y
CONFIG_HID_MULTITOUCH=y
-CONFIG_USB_DWC3=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_MSM=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_DUAL_ROLE_USB_INTF=y
-CONFIG_USB_MSM_SSPHY_QMP=y
-CONFIG_MSM_QUSB_PHY=y
CONFIG_USB_GADGET=y
CONFIG_USB_GADGET_DEBUG_FILES=y
CONFIG_USB_GADGET_DEBUG_FS=y
@@ -391,7 +393,6 @@
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_MAILBOX=y
CONFIG_ARM_SMMU=y
-CONFIG_QCOM_PM=y
CONFIG_MSM_SPM=y
CONFIG_MSM_L2_SPM=y
CONFIG_MSM_BOOT_STATS=y
diff --git a/arch/arm/configs/msm8909w-perf_defconfig b/arch/arm/configs/msm8909w-perf_defconfig
index 5a56d63..69dc93f 100644
--- a/arch/arm/configs/msm8909w-perf_defconfig
+++ b/arch/arm/configs/msm8909w-perf_defconfig
@@ -411,7 +411,6 @@
CONFIG_MAILBOX=y
CONFIG_ARM_SMMU=y
CONFIG_QCOM_LAZY_MAPPING=y
-CONFIG_QCOM_PM=y
CONFIG_MSM_SPM=y
CONFIG_MSM_L2_SPM=y
CONFIG_MSM_BOOT_STATS=y
@@ -442,6 +441,7 @@
CONFIG_MSM_PIL_SSR_GENERIC=y
CONFIG_MSM_PIL_MSS_QDSP6V5=y
CONFIG_MSM_EVENT_TIMER=y
+CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_MSM_BAM_DMUX=y
CONFIG_MSM_GLINK_BGCOM_XPRT=y
diff --git a/arch/arm/configs/msm8909w_defconfig b/arch/arm/configs/msm8909w_defconfig
index af47269..2eb602b 100644
--- a/arch/arm/configs/msm8909w_defconfig
+++ b/arch/arm/configs/msm8909w_defconfig
@@ -425,7 +425,6 @@
CONFIG_MAILBOX=y
CONFIG_ARM_SMMU=y
CONFIG_QCOM_LAZY_MAPPING=y
-CONFIG_QCOM_PM=y
CONFIG_MSM_SPM=y
CONFIG_MSM_L2_SPM=y
CONFIG_MSM_BOOT_STATS=y
@@ -546,8 +545,13 @@
CONFIG_PID_IN_CONTEXTIDR=y
CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SINK_TPIU=y
+CONFIG_CORESIGHT_SOURCE_ETM3X=y
CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_DBGUI=y
CONFIG_CORESIGHT_STM=y
CONFIG_CORESIGHT_TPDA=y
CONFIG_CORESIGHT_TPDM=y
diff --git a/arch/arm/configs/msm8937-perf_defconfig b/arch/arm/configs/msm8937-perf_defconfig
index b113ebd..b96a08d 100644
--- a/arch/arm/configs/msm8937-perf_defconfig
+++ b/arch/arm/configs/msm8937-perf_defconfig
@@ -19,6 +19,8 @@
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_BPF=y
CONFIG_SCHED_CORE_CTL=y
@@ -74,7 +76,6 @@
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
-CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_BOOST=y
CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
@@ -306,6 +307,10 @@
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26=y
+CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26=y
CONFIG_TOUCHSCREEN_FT5X06=y
CONFIG_TOUCHSCREEN_GEN_VKEYS=y
CONFIG_INPUT_MISC=y
@@ -354,11 +359,19 @@
CONFIG_MSM_APM=y
CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
CONFIG_THERMAL_QPNP=y
CONFIG_THERMAL_QPNP_ADC_TM=y
CONFIG_THERMAL_TSENS=y
-CONFIG_MSM_BCL_PERIPHERAL_CTL=y
-CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
+CONFIG_QTI_BCL_PMIC5=y
+CONFIG_QTI_BCL_SOC_DRIVER=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
@@ -559,7 +572,6 @@
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_MEM_SHARE_QMI_SERVICE=y
-# CONFIG_MSM_JTAGV8 is not set
CONFIG_MSM_BAM_DMUX=y
CONFIG_WCNSS_CORE=y
CONFIG_WCNSS_CORE_PRONTO=y
@@ -605,7 +617,6 @@
CONFIG_CPU_FREQ_SWITCH_PROFILER=y
CONFIG_CORESIGHT=y
CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
-CONFIG_CORESIGHT_SOURCE_ETM4X=y
CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
CONFIG_CORESIGHT_QCOM_REPLICATOR=y
@@ -626,6 +637,7 @@
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_LZ4=y
CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
diff --git a/arch/arm/configs/msm8937_defconfig b/arch/arm/configs/msm8937_defconfig
index 1cccfd3..7ed8509 100644
--- a/arch/arm/configs/msm8937_defconfig
+++ b/arch/arm/configs/msm8937_defconfig
@@ -20,6 +20,8 @@
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_BPF=y
CONFIG_SCHED_CORE_CTL=y
@@ -77,7 +79,6 @@
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
-CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_BOOST=y
CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
@@ -311,6 +312,10 @@
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26=y
+CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26=y
CONFIG_TOUCHSCREEN_FT5X06=y
CONFIG_TOUCHSCREEN_GEN_VKEYS=y
CONFIG_INPUT_MISC=y
@@ -361,11 +366,19 @@
CONFIG_MSM_APM=y
CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
CONFIG_THERMAL_QPNP=y
CONFIG_THERMAL_QPNP_ADC_TM=y
CONFIG_THERMAL_TSENS=y
-CONFIG_MSM_BCL_PERIPHERAL_CTL=y
-CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
+CONFIG_QTI_BCL_PMIC5=y
+CONFIG_QTI_BCL_SOC_DRIVER=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
@@ -541,6 +554,7 @@
CONFIG_IOMMU_DEBUG=y
CONFIG_IOMMU_DEBUG_TRACKING=y
CONFIG_IOMMU_TESTS=y
+CONFIG_QCOM_CPUSS_DUMP=y
CONFIG_QCOM_RUN_QUEUE_STATS=y
CONFIG_MSM_SPM=y
CONFIG_MSM_L2_SPM=y
@@ -549,6 +563,7 @@
CONFIG_MSM_GLADIATOR_HANG_DETECT=y
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_MSM_DEBUG_LAR_UNLOCK=y
CONFIG_MSM_RPM_SMD=y
CONFIG_QCOM_BUS_SCALING=y
CONFIG_QCOM_SECURE_BUFFER=y
@@ -619,6 +634,8 @@
CONFIG_DEBUG_PAGEALLOC=y
CONFIG_SLUB_DEBUG_PANIC_ON=y
CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
+CONFIG_PAGE_POISONING=y
+CONFIG_PAGE_POISONING_ENABLE_DEFAULT=y
CONFIG_DEBUG_OBJECTS=y
CONFIG_DEBUG_OBJECTS_FREE=y
CONFIG_DEBUG_OBJECTS_TIMERS=y
@@ -660,6 +677,7 @@
CONFIG_MEMTEST=y
CONFIG_PANIC_ON_DATA_CORRUPTION=y
CONFIG_DEBUG_USER=y
+CONFIG_FORCE_PAGES=y
CONFIG_PID_IN_CONTEXTIDR=y
CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_CORESIGHT=y
@@ -685,6 +703,7 @@
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_LZ4=y
CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
diff --git a/arch/arm/configs/msm8953-batcam-perf_defconfig b/arch/arm/configs/msm8953-batcam-perf_defconfig
index 1610d29..5b886a82 100644
--- a/arch/arm/configs/msm8953-batcam-perf_defconfig
+++ b/arch/arm/configs/msm8953-batcam-perf_defconfig
@@ -40,6 +40,7 @@
# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_MSM8953=y
+CONFIG_ARCH_MSM8953_BOOT_ORDERING=y
# CONFIG_VDSO is not set
CONFIG_SMP=y
CONFIG_SCHED_MC=y
@@ -66,6 +67,7 @@
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_HIBERNATION=y
CONFIG_HIBERNATION_IMAGE_REUSE=y
+CONFIG_HIBERNATION_SKIP_CRC=y
CONFIG_PM_STD_PARTITION="/dev/mmcblk0p49"
CONFIG_PM_AUTOSLEEP=y
CONFIG_PM_WAKELOCKS=y
@@ -75,6 +77,7 @@
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_DMA_CMA=y
+# CONFIG_OF_KOBJ is not set
CONFIG_QSEECOM=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
@@ -141,11 +144,6 @@
CONFIG_VIDEO_V4L2_SUBDEV_API=y
CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_FB=y
-CONFIG_FB_MSM=y
-CONFIG_FB_MSM_MDSS=y
-CONFIG_FB_MSM_MDSS_WRITEBACK=y
-CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y
-CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_DYNAMIC_MINORS=y
@@ -177,6 +175,7 @@
CONFIG_RTC_DRV_QPNP=y
CONFIG_DMADEVICES=y
CONFIG_QCOM_SPS_DMA=y
+CONFIG_SYNC_FILE=y
CONFIG_UIO=y
CONFIG_UIO_MSM_SHAREDMEM=y
CONFIG_STAGING=y
@@ -190,7 +189,6 @@
CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_QPNP_COINCELL=y
CONFIG_QPNP_REVID=y
-CONFIG_MSM_MDSS_PLL=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_MAILBOX=y
CONFIG_ARM_SMMU=y
@@ -213,7 +211,6 @@
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_MSM_PIL=y
CONFIG_MSM_PIL_SSR_GENERIC=y
-CONFIG_MSM_PIL_MSS_QDSP6V5=y
CONFIG_ICNSS=y
CONFIG_MSM_PERFORMANCE=y
CONFIG_MSM_EVENT_TIMER=y
diff --git a/arch/arm/configs/msm8953-batcam_defconfig b/arch/arm/configs/msm8953-batcam_defconfig
index 1ba9d96..dc6688c 100644
--- a/arch/arm/configs/msm8953-batcam_defconfig
+++ b/arch/arm/configs/msm8953-batcam_defconfig
@@ -39,6 +39,7 @@
# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_MSM8953=y
+CONFIG_ARCH_MSM8953_BOOT_ORDERING=y
# CONFIG_VDSO is not set
CONFIG_SMP=y
CONFIG_SCHED_MC=y
@@ -65,6 +66,7 @@
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_HIBERNATION=y
CONFIG_HIBERNATION_IMAGE_REUSE=y
+CONFIG_HIBERNATION_SKIP_CRC=y
CONFIG_PM_STD_PARTITION="/dev/mmcblk0p49"
CONFIG_PM_AUTOSLEEP=y
CONFIG_PM_WAKELOCKS=y
@@ -74,6 +76,7 @@
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_DMA_CMA=y
+# CONFIG_OF_KOBJ is not set
CONFIG_QSEECOM=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
@@ -142,11 +145,6 @@
CONFIG_VIDEO_V4L2_SUBDEV_API=y
CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_FB=y
-CONFIG_FB_MSM=y
-CONFIG_FB_MSM_MDSS=y
-CONFIG_FB_MSM_MDSS_WRITEBACK=y
-CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y
-CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_DYNAMIC_MINORS=y
@@ -178,6 +176,7 @@
CONFIG_RTC_DRV_QPNP=y
CONFIG_DMADEVICES=y
CONFIG_QCOM_SPS_DMA=y
+CONFIG_SYNC_FILE=y
CONFIG_UIO=y
CONFIG_UIO_MSM_SHAREDMEM=y
CONFIG_STAGING=y
@@ -191,7 +190,6 @@
CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_QPNP_COINCELL=y
CONFIG_QPNP_REVID=y
-CONFIG_MSM_MDSS_PLL=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_MAILBOX=y
CONFIG_ARM_SMMU=y
@@ -214,7 +212,6 @@
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_MSM_PIL=y
CONFIG_MSM_PIL_SSR_GENERIC=y
-CONFIG_MSM_PIL_MSS_QDSP6V5=y
CONFIG_ICNSS=y
CONFIG_MSM_PERFORMANCE=y
CONFIG_MSM_EVENT_TIMER=y
diff --git a/arch/arm/configs/msm8953-perf_defconfig b/arch/arm/configs/msm8953-perf_defconfig
index aa557b0..88e5e22 100644
--- a/arch/arm/configs/msm8953-perf_defconfig
+++ b/arch/arm/configs/msm8953-perf_defconfig
@@ -17,6 +17,7 @@
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
CONFIG_RT_GROUP_SCHED=y
diff --git a/arch/arm/configs/msm8953_defconfig b/arch/arm/configs/msm8953_defconfig
index f38341d..01da3bf 100644
--- a/arch/arm/configs/msm8953_defconfig
+++ b/arch/arm/configs/msm8953_defconfig
@@ -18,6 +18,7 @@
CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
CONFIG_CGROUP_DEBUG=y
CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
CONFIG_RT_GROUP_SCHED=y
@@ -557,6 +558,7 @@
CONFIG_IOMMU_DEBUG=y
CONFIG_IOMMU_DEBUG_TRACKING=y
CONFIG_IOMMU_TESTS=y
+CONFIG_QCOM_CPUSS_DUMP=y
CONFIG_QCOM_RUN_QUEUE_STATS=y
CONFIG_MSM_SPM=y
CONFIG_MSM_L2_SPM=y
@@ -565,6 +567,7 @@
CONFIG_MSM_GLADIATOR_HANG_DETECT=y
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_MSM_DEBUG_LAR_UNLOCK=y
CONFIG_MSM_RPM_SMD=y
CONFIG_QCOM_BUS_SCALING=y
CONFIG_QCOM_SECURE_BUFFER=y
@@ -631,6 +634,8 @@
CONFIG_DEBUG_PAGEALLOC=y
CONFIG_SLUB_DEBUG_PANIC_ON=y
CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
+CONFIG_PAGE_POISONING=y
+CONFIG_PAGE_POISONING_ENABLE_DEFAULT=y
CONFIG_DEBUG_OBJECTS=y
CONFIG_DEBUG_OBJECTS_FREE=y
CONFIG_DEBUG_OBJECTS_TIMERS=y
@@ -672,6 +677,7 @@
CONFIG_MEMTEST=y
CONFIG_PANIC_ON_DATA_CORRUPTION=y
CONFIG_DEBUG_USER=y
+CONFIG_FORCE_PAGES=y
CONFIG_PID_IN_CONTEXTIDR=y
CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_CORESIGHT=y
diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig
index 14c2a7c..cab3796 100644
--- a/arch/arm/configs/sdxpoorwills-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-perf_defconfig
@@ -32,7 +32,9 @@
CONFIG_CMA=y
CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_BOOST=y
CONFIG_CPU_FREQ_MSM=y
CONFIG_CPU_IDLE=y
CONFIG_VFP=y
@@ -407,9 +409,11 @@
CONFIG_PANIC_TIMEOUT=5
# CONFIG_SCHED_DEBUG is not set
CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_STACK_END_CHECK=y
# CONFIG_DEBUG_PREEMPT is not set
CONFIG_IPC_LOGGING=y
CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_CORESIGHT=y
CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
@@ -421,6 +425,7 @@
CONFIG_CORESIGHT_HWEVENT=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
# CONFIG_SECURITY_SELINUX_AVC_STATS is not set
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index b259dc7..d86bc52 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -34,7 +34,9 @@
CONFIG_CMA=y
CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_BOOST=y
CONFIG_CPU_FREQ_MSM=y
CONFIG_CPU_IDLE=y
CONFIG_VFP=y
@@ -416,6 +418,7 @@
CONFIG_PANIC_ON_RECURSIVE_FAULT=y
CONFIG_PANIC_TIMEOUT=5
CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_STACK_END_CHECK=y
CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
@@ -430,6 +433,7 @@
CONFIG_PREEMPT_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_CORESIGHT=y
CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
CONFIG_CORESIGHT_SOURCE_ETM3X=y
@@ -446,6 +450,7 @@
CONFIG_CORESIGHT_DUMMY=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
# CONFIG_SECURITY_SELINUX_AVC_STATS is not set
CONFIG_CRYPTO_CMAC=y
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index a808829..4d1065c 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -242,6 +242,15 @@
writel_relaxed((u32)(val >> 32), addr + 4);
}
+static inline u64 gic_read_irouter(const volatile void __iomem *addr)
+{
+ u64 val;
+
+ val = readl_relaxed(addr);
+ val |= (u64)readl_relaxed(addr + 4) << 32;
+ return val;
+}
+
static inline u64 gic_read_typer(const volatile void __iomem *addr)
{
u64 val;
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 2d1d821..42d3974 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -150,10 +150,15 @@
}
c = irq_data_get_irq_chip(d);
- if (!c->irq_set_affinity)
+ if (!c->irq_set_affinity) {
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
- else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
- cpumask_copy(irq_data_get_affinity_mask(d), affinity);
+ } else {
+ int r = irq_set_affinity_locked(d, affinity, false);
+
+ if (r)
+ pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
+ d->irq, r);
+ }
return ret;
}
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 5b6cb33..342efa6 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -1225,25 +1225,9 @@
static int armv7_probe_pmu(struct arm_pmu *arm_pmu)
{
- int ret;
- struct armv7_pmu_idle_nb *pmu_idle_nb;
-
- pmu_idle_nb = devm_kzalloc(&arm_pmu->plat_device->dev,
- sizeof(*pmu_idle_nb), GFP_KERNEL);
- if (!pmu_idle_nb)
- return -ENOMEM;
-
- ret = smp_call_function_any(&arm_pmu->supported_cpus,
+ return smp_call_function_any(&arm_pmu->supported_cpus,
armv7_read_num_pmnc_events,
&arm_pmu->num_events, 1);
- if (ret)
- return ret;
-
- pmu_idle_nb->cpu_pmu = arm_pmu;
- pmu_idle_nb->perf_cpu_idle_nb.notifier_call = armv7_pmu_idle_notifier;
- idle_notifier_register(&pmu_idle_nb->perf_cpu_idle_nb);
-
- return 0;
}
static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
@@ -2077,8 +2061,24 @@
static int armv7_pmu_device_probe(struct platform_device *pdev)
{
- return arm_pmu_device_probe(pdev, armv7_pmu_of_device_ids,
+ int ret;
+ struct armv7_pmu_idle_nb *pmu_idle_nb;
+
+ pmu_idle_nb = devm_kzalloc(&pdev->dev, sizeof(*pmu_idle_nb),
+ GFP_KERNEL);
+ if (!pmu_idle_nb)
+ return -ENOMEM;
+
+ ret = arm_pmu_device_probe(pdev, armv7_pmu_of_device_ids,
armv7_pmu_probe_table);
+ if (ret)
+ return ret;
+
+ pmu_idle_nb->cpu_pmu = (struct arm_pmu *) platform_get_drvdata(pdev);
+ pmu_idle_nb->perf_cpu_idle_nb.notifier_call = armv7_pmu_idle_notifier;
+ idle_notifier_register(&pmu_idle_nb->perf_cpu_idle_nb);
+
+ return 0;
}
static struct platform_driver armv7_pmu_driver = {
diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig
index b055b60..da48623 100644
--- a/arch/arm/mach-qcom/Kconfig
+++ b/arch/arm/mach-qcom/Kconfig
@@ -94,7 +94,7 @@
select MAY_HAVE_SPARSE_IRQ
select PINCTRL_MSM_TLMM
select USE_PINCTRL_IRQ
- select MSM_PM if PM
+ select MSM_PM_LEGACY if PM
select MSM_RPM_SMD
select MSM_RPM_STATS_LOG
select MSM_RPM_LOG
diff --git a/arch/arm/mach-qcom/board-msm8953.c b/arch/arm/mach-qcom/board-msm8953.c
index de4538f..9a82e3a 100644
--- a/arch/arm/mach-qcom/board-msm8953.c
+++ b/arch/arm/mach-qcom/board-msm8953.c
@@ -37,8 +37,8 @@
/* Explicitly parent the /soc devices to the root node to preserve
* the kernel ABI (sysfs structure, etc) until userspace is updated
*/
- of_platform_populate(of_find_node_by_path("/soc"),
- of_default_bus_match_table, NULL, NULL);
+ return of_platform_populate(of_find_node_by_path("/soc"),
+ of_default_bus_match_table, NULL, NULL);
}
late_initcall(msm8953_dt_populate);
#endif
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index b57aafc..f216025 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2372,6 +2372,7 @@
mapping->nr_bitmaps = 1;
mapping->extensions = extensions;
+ mapping->bits = BITS_PER_BYTE * bitmap_size;
spin_lock_init(&mapping->lock);
mapping->ops = &iommu_ops;
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 98238d9..4db459a 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -404,10 +404,10 @@
apq8053-iot-mtp.dtb \
apq8053-lite-dragon-v1.0.dtb \
apq8053-lite-dragon-v2.0.dtb \
- apq8053-lite-lenovo-v1.0.dtb \
- apq8053-lite-lenovo-v1.1.dtb \
- apq8053-lite-harman-v1.0.dtb \
- apq8053-lite-lge-v1.0.dtb \
+ apq8053-lite-dragon-v2.1.dtb \
+ apq8053-lite-dragon-v2.2.dtb \
+ apq8053-lite-dragon-v2.3.dtb \
+ apq8053-lite-dragon-v2.4.dtb \
msm8953-pmi8940-cdp.dtb \
msm8953-pmi8940-mtp.dtb \
msm8953-pmi8937-cdp.dtb \
diff --git a/arch/arm64/boot/dts/qcom/apq8009-robot-som-refboard.dts b/arch/arm64/boot/dts/qcom/apq8009-robot-som-refboard.dts
index 958f7c8..1314129 100644
--- a/arch/arm64/boot/dts/qcom/apq8009-robot-som-refboard.dts
+++ b/arch/arm64/boot/dts/qcom/apq8009-robot-som-refboard.dts
@@ -164,7 +164,7 @@
};
&external_image_mem {
- reg = <0x0 0x87a00000 0x0 0x0600000>;
+ reg = <0x0 0x87900000 0x0 0x0700000>;
};
&modem_adsp_mem {
@@ -172,7 +172,7 @@
};
&peripheral_mem {
- reg = <0x0 0x89e00000 0x0 0x0700000>;
+ status = "disabled";
};
&pm8916_chg {
@@ -186,3 +186,12 @@
&blsp1_uart2_hs {
status = "ok";
};
+
+&i2c_1 {
+ status = "okay";
+ vl53l0x@52 {
+ compatible = "st,stmvl53l0";
+ reg = <0x29>;
+ status = "ok";
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8009w-bg-alpha.dts b/arch/arm64/boot/dts/qcom/apq8009w-bg-alpha.dts
index 1fe7b15..20878c0 100644
--- a/arch/arm64/boot/dts/qcom/apq8009w-bg-alpha.dts
+++ b/arch/arm64/boot/dts/qcom/apq8009w-bg-alpha.dts
@@ -52,6 +52,7 @@
qcom,blackghost {
compatible = "qcom,pil-blackghost";
+ qcom,pil-force-shutdown;
qcom,firmware-name = "bg-wear";
/* GPIO inputs from blackghost */
qcom,bg2ap-status-gpio = <&msm_gpio 97 0>;
@@ -157,8 +158,11 @@
interrupts = <50 0>;
interrupt-names = "nfc_irq";
pinctrl-names = "nfc_active","nfc_suspend";
- pinctrl-0 = <&nfcw_int_active &nfcw_disable_active>;
+ pinctrl-0 = <&nfcw_int_active
+ &nfcw_disable_active
+ &nfc_clk_default>;
pinctrl-1 = <&nfcw_int_suspend &nfcw_disable_suspend>;
+ clocks = <&clock_rpm clk_bb_clk3_pin>;
clock-names = "ref_clk";
};
};
diff --git a/arch/arm64/boot/dts/qcom/apq8009w-bg-wtp-v2.dts b/arch/arm64/boot/dts/qcom/apq8009w-bg-wtp-v2.dts
index 8113670..e7af39f 100644
--- a/arch/arm64/boot/dts/qcom/apq8009w-bg-wtp-v2.dts
+++ b/arch/arm64/boot/dts/qcom/apq8009w-bg-wtp-v2.dts
@@ -71,6 +71,7 @@
qcom,blackghost {
compatible = "qcom,pil-blackghost";
+ qcom,pil-force-shutdown;
qcom,firmware-name = "bg-wear";
/* GPIO inputs from blackghost */
qcom,bg2ap-status-gpio = <&msm_gpio 97 0>;
@@ -176,8 +177,11 @@
interrupts = <50 0>;
interrupt-names = "nfc_irq";
pinctrl-names = "nfc_active","nfc_suspend";
- pinctrl-0 = <&nfcw_int_active &nfcw_disable_active>;
+ pinctrl-0 = <&nfcw_int_active
+ &nfcw_disable_active
+ &nfc_clk_default>;
pinctrl-1 = <&nfcw_int_suspend &nfcw_disable_suspend>;
+ clocks = <&clock_rpm clk_bb_clk3_pin>;
clock-names = "ref_clk";
};
};
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dts b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.1.dts
similarity index 86%
rename from arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dts
rename to arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.1.dts
index 325accf..6c9c266 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.1.dts
@@ -13,10 +13,10 @@
/dts-v1/;
-#include "apq8053-lite-lenovo-v1.0.dtsi"
+#include "apq8053-lite-dragon-v2.1.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. APQ8053 Lite Lenovo v1.0 Board";
+ model = "Qualcomm Technologies, Inc. APQ8053 Lite DragonBoard V2.1";
compatible = "qcom,apq8053-lite-dragonboard", "qcom,apq8053",
"qcom,dragonboard";
qcom,board-id= <0x01010020 0>;
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dtsi b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.1.dtsi
similarity index 100%
rename from arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.0.dtsi
rename to arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.1.dtsi
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.1.dts b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.2.dts
similarity index 86%
rename from arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.1.dts
rename to arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.2.dts
index 0c7b557..ecc4fea 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.1.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.2.dts
@@ -13,10 +13,10 @@
/dts-v1/;
-#include "apq8053-lite-lenovo-v1.1.dtsi"
+#include "apq8053-lite-dragon-v2.2.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. APQ8053 Lite Lenovo v1.1 Board";
+ model = "Qualcomm Technologies, Inc. APQ8053 Lite DragonBoard V2.2";
compatible = "qcom,apq8053-lite-dragonboard", "qcom,apq8053",
"qcom,dragonboard";
qcom,board-id= <0x01010120 0>;
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.1.dtsi b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.2.dtsi
similarity index 100%
rename from arch/arm64/boot/dts/qcom/apq8053-lite-lenovo-v1.1.dtsi
rename to arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.2.dtsi
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-harman-v1.0.dts b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.3.dts
similarity index 86%
rename from arch/arm64/boot/dts/qcom/apq8053-lite-harman-v1.0.dts
rename to arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.3.dts
index 203b6b8..e3f80be 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-lite-harman-v1.0.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.3.dts
@@ -13,10 +13,10 @@
/dts-v1/;
-#include "apq8053-lite-harman-v1.0.dtsi"
+#include "apq8053-lite-dragon-v2.3.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. APQ8053 Lite Harman v1.0 Board";
+ model = "Qualcomm Technologies, Inc. APQ8053 Lite DragonBoard V2.3";
compatible = "qcom,apq8053-lite-dragonboard", "qcom,apq8053",
"qcom,dragonboard";
qcom,board-id= <0x01020020 0>;
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-harman-v1.0.dtsi b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.3.dtsi
similarity index 100%
rename from arch/arm64/boot/dts/qcom/apq8053-lite-harman-v1.0.dtsi
rename to arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.3.dtsi
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-lge-v1.0.dts b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.4.dts
similarity index 86%
rename from arch/arm64/boot/dts/qcom/apq8053-lite-lge-v1.0.dts
rename to arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.4.dts
index 70952dc..1f40ef8 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-lite-lge-v1.0.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.4.dts
@@ -13,10 +13,10 @@
/dts-v1/;
-#include "apq8053-lite-lge-v1.0.dtsi"
+#include "apq8053-lite-dragon-v2.4.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. APQ8053 Lite LGE v1.0 Board";
+ model = "Qualcomm Technologies, Inc. APQ8053 Lite DragonBoard V2.4";
compatible = "qcom,apq8053-lite-dragonboard", "qcom,apq8053",
"qcom,dragonboard";
qcom,board-id= <0x01030020 0>;
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-lge-v1.0.dtsi b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.4.dtsi
similarity index 100%
rename from arch/arm64/boot/dts/qcom/apq8053-lite-lge-v1.0.dtsi
rename to arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.4.dtsi
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-390p-auo-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-390p-auo-cmd.dtsi
index b4ac287..06fc5a4 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-390p-auo-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-390p-auo-cmd.dtsi
@@ -70,6 +70,10 @@
29 01 00 00 00 00 05 2a 00 04 01 89
/* Reset row start address */
29 01 00 00 00 00 05 2b 00 00 01 85
+ 15 01 00 00 00 00 02 fe 01
+ 15 01 00 00 00 00 02 04 00
+ 15 01 00 00 00 00 02 fe 00
+ 15 01 00 00 00 00 02 3a 77
];
qcom,mdss-dsi-traffic-mode = "burst_mode";
qcom,mdss-dsi-lane-map = "lane_map_0123";
diff --git a/arch/arm64/boot/dts/qcom/msm8909-gpu.dtsi b/arch/arm64/boot/dts/qcom/msm8909-gpu.dtsi
index dc95570..180d6c3 100644
--- a/arch/arm64/boot/dts/qcom/msm8909-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8909-gpu.dtsi
@@ -19,26 +19,15 @@
/* To use BIMC based bus governor */
gpubw: qcom,gpubw {
compatible = "qcom,devbw";
- governor = "bw_hwmon";
+ governor = "bw_vbif";
qcom,src-dst-ports = <26 512>;
qcom,bw-tbl =
< 0 >, /* 9.6 MHz */
- < 381 >, /* 50.0 MHz */
- < 762 >, /* 100.0 MHz */
< 1525 >, /* 200.0 MHz */
< 3051 >, /* 400.0 MHz */
< 4066 >; /* 533.0 MHz */
};
- qcom,gpu-bwmon@410000 {
- compatible = "qcom,bimc-bwmon2";
- reg = <0x00410000 0x300>, <0x00401000 0x200>;
- reg-names = "base", "global_base";
- interrupts = <0 183 4>;
- qcom,mport = <2>;
- qcom,target-dev = <&gpubw>;
- };
-
msm_gpu: qcom,kgsl-3d0@01c00000 {
label = "kgsl-3d0";
compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d";
@@ -103,24 +92,32 @@
reg = <0>;
qcom,gpu-freq = <456000000>;
qcom,bus-freq = <3>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <3>;
};
qcom,gpu-pwrlevel@1 {
reg = <1>;
qcom,gpu-freq = <307200000>;
qcom,bus-freq = <2>;
+ qcom,bus-min = <2>;
+ qcom,bus-max = <3>;
};
qcom,gpu-pwrlevel@2 {
reg = <2>;
qcom,gpu-freq = <200000000>;
- qcom,bus-freq = <1>;
+ qcom,bus-freq = <2>;
+ qcom,bus-min = <1>;
+ qcom,bus-max = <2>;
};
qcom,gpu-pwrlevel@3 {
reg = <3>;
qcom,gpu-freq = <19200000>;
qcom,bus-freq = <0>;
+ qcom,bus-min = <0>;
+ qcom,bus-max = <0>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm8909w-bg-wtp-v2.dts b/arch/arm64/boot/dts/qcom/msm8909w-bg-wtp-v2.dts
index 9dd80f0..6f61dd4 100644
--- a/arch/arm64/boot/dts/qcom/msm8909w-bg-wtp-v2.dts
+++ b/arch/arm64/boot/dts/qcom/msm8909w-bg-wtp-v2.dts
@@ -71,6 +71,7 @@
qcom,blackghost {
compatible = "qcom,pil-blackghost";
+ qcom,pil-force-shutdown;
qcom,firmware-name = "bg-wear";
/* GPIO inputs from blackghost */
qcom,bg2ap-status-gpio = <&msm_gpio 97 0>;
@@ -221,6 +222,7 @@
&nfcw_disable_active
&nfc_clk_default>;
pinctrl-1 = <&nfcw_int_suspend &nfcw_disable_suspend>;
+ clocks = <&clock_rpm clk_bb_clk3_pin>;
clock-names = "ref_clk";
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm8917-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/msm8917-pinctrl.dtsi
index b9229e1..26fb25c 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-pinctrl.dtsi
@@ -15,10 +15,11 @@
tlmm: pinctrl@1000000 {
compatible = "qcom,msm8917-pinctrl";
reg = <0x1000000 0x300000>;
- interrupts = <0 208 0>;
+ interrupts-extended = <&wakegic GIC_SPI 208 IRQ_TYPE_NONE>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
+ interrupt-parent = <&wakegpio>;
#interrupt-cells = <2>;
diff --git a/arch/arm64/boot/dts/qcom/msm8917-pm.dtsi b/arch/arm64/boot/dts/qcom/msm8917-pm.dtsi
index 575d1b5..a3b4679 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-pm.dtsi
@@ -31,7 +31,6 @@
qcom,lpm-levels {
compatible = "qcom,lpm-levels";
- qcom,use-psci;
#address-cells = <1>;
#size-cells = <0>;
@@ -40,8 +39,6 @@
#address-cells = <1>;
#size-cells = <0>;
label = "perf";
- qcom,spm-device-names = "l2";
- qcom,default-level=<0>;
qcom,psci-mode-shift = <4>;
qcom,psci-mode-mask = <0xf>;
@@ -98,11 +95,12 @@
#size-cells = <0>;
qcom,psci-mode-shift = <0>;
qcom,psci-mode-mask = <0xf>;
+ qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3>;
qcom,pm-cpu-level@0 {
reg = <0>;
- qcom,psci-cpu-mode = <0>;
label = "wfi";
+ qcom,psci-cpu-mode = <1>;
qcom,latency-us = <12>;
qcom,ss-power = <463>;
qcom,energy-overhead = <23520>;
@@ -111,8 +109,8 @@
qcom,pm-cpu-level@1 {
reg = <1>;
- qcom,psci-cpu-mode = <3>;
label = "pc";
+ qcom,psci-cpu-mode = <3>;
qcom,latency-us = <180>;
qcom,ss-power = <429>;
qcom,energy-overhead = <162991>;
@@ -125,204 +123,10 @@
};
};
- qcom,mpm@601d0 {
- compatible = "qcom,mpm-v2";
- reg = <0x601d0 0x1000>, /* MSM_RPM_MPM_BASE 4K */
- <0xb011008 0x4>;
- reg-names = "vmpm", "ipc";
- interrupts = <0 171 1>;
- clocks = <&clock_gcc clk_xo_lpm_clk>;
- clock-names = "xo";
- qcom,ipc-bit-offset = <1>;
- qcom,gic-parent = <&intc>;
- qcom,gic-map = <2 216>, /* tsens_upper_lower_int */
- <49 172>, /* usb1_hs_async_wakeup_irq */
- <58 166>, /* usb_hs_irq */
- <53 104>, /* mdss_irq */
- <62 222>, /* ee0_krait_hlos_spmi_periph_irq */
- <0xff 18>, /* APC_qgicQTmrSecPhysIrptReq */
- <0xff 19>, /* APC_qgicQTmrNonSecPhysIrptReq */
- <0xff 20>, /* qgicQTmrVirtIrptReq */
- <0xff 35>, /* WDT_barkInt */
- <0xff 39>, /* arch_mem_timer */
- <0xff 40>, /* qtmr_phy_irq[0] */
- <0xff 47>, /* rbif_irq[0] */
- <0xff 56>, /* q6_wdog_expired_irq */
- <0xff 57>, /* mss_to_apps_irq(0) */
- <0xff 58>, /* mss_to_apps_irq(1) */
- <0xff 59>, /* mss_to_apps_irq(2) */
- <0xff 60>, /* mss_to_apps_irq(3) */
- <0xff 61>, /* mss_a2_bam_irq */
- <0xff 65>, /* o_gc_sys_irq[0] */
- <0xff 69>, /* vbif_irpt */
- <0xff 73>, /* smmu_intr_bus[1] */
- <0xff 74>, /* smmu_bus_intr[2] */
- <0xff 75>, /* smmu_bus_intr[3] */
- <0xff 76>, /* venus_irq */
- <0xff 78>, /* smmu_bus_intr[5] */
- <0xff 79>, /* smmu_bus_intr[6] */
- <0xff 85>, /* smmu_bus_intr[31] */
- <0xff 86>, /* smmu_bus_intr[32] */
- <0xff 90>, /* smmu_bus_intr[33] */
- <0xff 92>, /* smmu_bus_intr[34] */
- <0xff 93>, /* smmu_bus_intr[35] */
- <0xff 97>, /* smmu_bus_intr[10] */
- <0xff 102>, /* smmu_bus_intr[14] */
- <0xff 108>, /* smmu_bus_intr[36] */
- <0xff 109>, /* smmu_bus_intr[37] */
- <0xff 112>, /* smmu_bus_intr[38] */
- <0xff 114>, /* qdsd_intr_out */
- <0xff 126>, /* smmu_bus_intr[39] */
- <0xff 128>, /* blsp1_peripheral_irq[3] */
- <0xff 129>, /* blsp1_peripheral_irq[4] */
- <0xff 131>, /* qup_irq */
- <0xff 136>, /* smmu_bus_intr[43] */
- <0xff 137>, /* smmu_intr_bus[44] */
- <0xff 138>, /* smmu_intr_bus[45] */
- <0xff 140>, /* uart_dm_intr */
- <0xff 141>, /* smmu_bus_intr[46] */
- <0xff 142>, /* smmu_bus_intr[47] */
- <0xff 143>, /* smmu_bus_intr[48] */
- <0xff 144>, /* smmu_bus_intr[49] */
- <0xff 145>, /* smmu_bus_intr[50] */
- <0xff 146>, /* smmu_bus_intr[51] */
- <0xff 147>, /* smmu_bus_intr[52] */
- <0xff 148>, /* smmu_bus_intr[53] */
- <0xff 149>, /* smmu_bus_intr[54] */
- <0xff 150>, /* smmu_bus_intr[55] */
- <0xff 151>, /* smmu_bus_intr[56] */
- <0xff 152>, /* smmu_bus_intr[57] */
- <0xff 153>, /* smmu_bus_intr[58] */
- <0xff 155>, /* sdc1_irq(0) */
- <0xff 157>, /* sdc2_irq(0) */
- <0xff 167>, /* bam_irq(0) */
- <0xff 170>, /* sdc1_pwr_cmd_irq */
- <0xff 173>, /* o_wcss_apss_smd_hi */
- <0xff 174>, /* o_wcss_apss_smd_med */
- <0xff 175>, /* o_wcss_apss_smd_low */
- <0xff 176>, /* o_wcss_apss_smsm_irq */
- <0xff 177>, /* o_wcss_apss_wlan_data_xfer_done */
- <0xff 178>, /* o_wcss_apss_wlan_rx_data_avail */
- <0xff 179>, /* o_wcss_apss_asic_intr */
- <0xff 181>, /* o_wcss_apss_wdog_bite_and_reset_rdy */
- <0xff 188>, /* lpass_irq_out_apcs(0) */
- <0xff 189>, /* lpass_irq_out_apcs(1) */
- <0xff 190>, /* lpass_irq_out_apcs(2) */
- <0xff 191>, /* lpass_irq_out_apcs(3) */
- <0xff 192>, /* lpass_irq_out_apcs(4) */
- <0xff 193>, /* lpass_irq_out_apcs(5) */
- <0xff 194>, /* lpass_irq_out_apcs(6) */
- <0xff 195>, /* lpass_irq_out_apcs(7) */
- <0xff 196>, /* lpass_irq_out_apcs(8) */
- <0xff 197>, /* lpass_irq_out_apcs(9) */
- <0xff 198>, /* coresight-tmc-etr interrupt */
- <0xff 200>, /* rpm_ipc(4) */
- <0xff 201>, /* rpm_ipc(5) */
- <0xff 202>, /* rpm_ipc(6) */
- <0xff 203>, /* rpm_ipc(7) */
- <0xff 204>, /* rpm_ipc(24) */
- <0xff 205>, /* rpm_ipc(25) */
- <0xff 206>, /* rpm_ipc(26) */
- <0xff 207>, /* rpm_ipc(27) */
- <0xff 215>, /* o_bimc_intr[0] */
- <0xff 224>, /* SPDM interrupt */
- <0xff 239>, /* crypto_bam_irq[1]*/
- <0xff 240>, /* summary_irq_kpss */
- <0xff 253>, /* sdcc_pwr_cmd_irq */
- <0xff 260>, /* ipa_irq[0] */
- <0xff 261>, /* ipa_irq[2] */
- <0xff 262>, /* ipa_bam_irq[0] */
- <0xff 263>, /* ipa_bam_irq[2] */
- <0xff 269>, /* rpm_wdog_expired_irq */
- <0xff 270>, /* blsp1_bam_irq[0] */
- <0xff 272>, /* smmu_intr_bus[17] */
- <0xff 273>, /* smmu_bus_intr[18] */
- <0xff 274>, /* smmu_bus_intr[19] */
- <0xff 275>, /* rpm_ipc(30) */
- <0xff 276>, /* rpm_ipc(31) */
- <0xff 277>, /* smmu_intr_bus[20] */
- <0xff 285>, /* smmu_bus_intr[28] */
- <0xff 286>, /* smmu_bus_intr[29] */
- <0xff 287>, /* smmu_bus_intr[30] */
- <0xff 321>, /* q6ss_irq_out(4) */
- <0xff 322>, /* q6ss_irq_out(5) */
- <0xff 323>, /* q6ss_irq_out(6) */
- <0xff 325>, /* q6ss_wdog_exp_irq */
- <0xff 344>; /* sdcc1ice */
-
- qcom,gpio-parent = <&tlmm>;
- qcom,gpio-map = <3 38 >,
- <4 1 >,
- <5 5 >,
- <6 9 >,
- <8 37>,
- <9 36>,
- <10 13>,
- <11 35>,
- <12 17>,
- <13 21>,
- <14 54>,
- <15 34>,
- <16 31>,
- <17 58>,
- <18 28>,
- <19 42>,
- <20 25>,
- <21 12>,
- <22 43>,
- <23 44>,
- <24 45>,
- <25 46>,
- <26 48>,
- <27 65>,
- <28 93>,
- <29 97>,
- <30 63>,
- <31 70>,
- <32 71>,
- <33 72>,
- <34 81>,
- <35 126>,
- <36 90>,
- <37 128>,
- <38 91>,
- <39 41>,
- <40 127>,
- <41 86>,
- <50 67>,
- <51 73>,
- <52 74>,
- <53 62>,
- <54 124>,
- <55 61>,
- <56 130>,
- <57 59>,
- <59 50>;
- };
-
- qcom,cpu-sleep-status {
- compatible = "qcom,cpu-sleep-status";
- };
-
- qcom,rpm-log@29dc00 {
- compatible = "qcom,rpm-log";
- reg = <0x29dc00 0x4000>;
- qcom,rpm-addr-phys = <0x200000>;
- qcom,offset-version = <4>;
- qcom,offset-page-buffer-addr = <36>;
- qcom,offset-log-len = <40>;
- qcom,offset-log-len-mask = <44>;
- qcom,offset-page-indices = <56>;
- };
-
qcom,rpm-stats@29dba0 {
compatible = "qcom,rpm-stats";
- reg = <0x200000 0x1000>,
- <0x290014 0x4>,
- <0x29001c 0x4>;
- reg-names = "phys_addr_base", "offset_addr",
- "heap_phys_addrbase";
- qcom,sleep-stats-version = <2>;
+ reg = <0x200000 0x1000>, <0x290014 0x4>;
+ reg-names = "phys_addr_base", "offset_addr";
};
qcom,rpm-master-stats@60150 {
diff --git a/arch/arm64/boot/dts/qcom/msm8917.dtsi b/arch/arm64/boot/dts/qcom/msm8917.dtsi
index a285110..d080539 100644
--- a/arch/arm64/boot/dts/qcom/msm8917.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917.dtsi
@@ -21,7 +21,7 @@
model = "Qualcomm Technologies, Inc. MSM8917";
compatible = "qcom,msm8917";
qcom,msm-id = <303 0x0>, <308 0x0>, <309 0x0>;
- interrupt-parent = <&intc>;
+ interrupt-parent = <&wakegic>;
chosen {
bootargs = "sched_enable_hmp=1";
@@ -197,19 +197,19 @@
};
wakegic: wake-gic {
- compatible = "qcom,mpm-gic", "qcom,mpm-gic-msm8937";
+ compatible = "qcom,mpm-gic-msm8937", "qcom,mpm-gic";
interrupts = <GIC_SPI 171 IRQ_TYPE_EDGE_RISING>;
reg = <0x601d0 0x1000>,
<0xb011008 0x4>; /* MSM_APCS_GCC_BASE 4K */
reg-names = "vmpm", "ipc";
- qcom,num-mpm-irqs = <96>;
+ qcom,num-mpm-irqs = <64>;
interrupt-controller;
interrupt-parent = <&intc>;
#interrupt-cells = <3>;
};
wakegpio: wake-gpio {
- compatible = "qcom,mpm-gpio", "qcom,mpm-gpio-msm8937";
+ compatible = "qcom,mpm-gpio-msm8937", "qcom,mpm-gpio";
interrupt-controller;
interrupt-parent = <&intc>;
#interrupt-cells = <2>;
diff --git a/arch/arm64/boot/dts/qcom/msm8937-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8937-mtp.dtsi
index 57823b8..90685e9 100644
--- a/arch/arm64/boot/dts/qcom/msm8937-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8937-mtp.dtsi
@@ -156,6 +156,39 @@
};
};
+&pm8937_gpios {
+ nfc_clk {
+ nfc_clk_default: nfc_clk_default {
+ pins = "gpio5";
+ function = "normal";
+ input-enable;
+ power-source = <1>;
+ };
+ };
+};
+
+&i2c_5 { /* BLSP2 QUP1 (NFC) */
+ status = "ok";
+ nq@28 {
+ compatible = "qcom,nq-nci";
+ reg = <0x28>;
+ qcom,nq-irq = <&tlmm 17 0x00>;
+ qcom,nq-ven = <&tlmm 16 0x00>;
+ qcom,nq-firm = <&tlmm 130 0x00>;
+ qcom,nq-clkreq = <&pm8937_gpios 5 0x00>;
+ interrupt-parent = <&tlmm>;
+ qcom,clk-src = "BBCLK2";
+ interrupts = <17 0>;
+ interrupt-names = "nfc_irq";
+ pinctrl-names = "nfc_active", "nfc_suspend";
+ pinctrl-0 = <&nfc_int_active &nfc_disable_active
+ &nfc_clk_default>;
+ pinctrl-1 = <&nfc_int_suspend &nfc_disable_suspend>;
+ clocks = <&clock_gcc clk_bb_clk2_pin>;
+ clock-names = "ref_clk";
+ };
+};
+
&thermal_zones {
quiet-therm-step {
status = "disabled";
diff --git a/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-cdp.dtsi
index 6e961b1..d47dd75 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-cdp.dtsi
@@ -207,8 +207,8 @@
cam_v_custom1-supply = <&pm8953_l23>;
qcom,cam-vreg-name = "cam_vio", "cam_vdig", "cam_vaf",
"cam_vana", "cam_v_custom1";
- qcom,cam-vreg-min-voltage = <0 1200000 2850000 2800000 1220000>;
- qcom,cam-vreg-max-voltage = <0 1200000 2850000 2800000 1220000>;
+ qcom,cam-vreg-min-voltage = <0 1200000 2850000 2800000 1200000>;
+ qcom,cam-vreg-max-voltage = <0 1200000 2850000 2800000 1200000>;
qcom,cam-vreg-op-mode = <0 105000 100000 80000 105000>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk0_default
diff --git a/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-mtp.dtsi
index 6e961b1..d47dd75 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-mtp.dtsi
@@ -207,8 +207,8 @@
cam_v_custom1-supply = <&pm8953_l23>;
qcom,cam-vreg-name = "cam_vio", "cam_vdig", "cam_vaf",
"cam_vana", "cam_v_custom1";
- qcom,cam-vreg-min-voltage = <0 1200000 2850000 2800000 1220000>;
- qcom,cam-vreg-max-voltage = <0 1200000 2850000 2800000 1220000>;
+ qcom,cam-vreg-min-voltage = <0 1200000 2850000 2800000 1200000>;
+ qcom,cam-vreg-max-voltage = <0 1200000 2850000 2800000 1200000>;
qcom,cam-vreg-op-mode = <0 105000 100000 80000 105000>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk0_default
diff --git a/arch/arm64/boot/dts/qcom/msm8953-mdss-panels.dtsi b/arch/arm64/boot/dts/qcom/msm8953-mdss-panels.dtsi
index f7671dc..7ee30f6 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-mdss-panels.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-mdss-panels.dtsi
@@ -75,7 +75,13 @@
23 1e 08 09 05 03 04 a0
23 1a 08 09 05 03 04 a0];
qcom,esd-check-enabled;
- qcom,mdss-dsi-panel-status-check-mode = "bta_check";
+ qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+ qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+ qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-panel-status-value = <0x1c>;
+ qcom,mdss-dsi-panel-on-check-value = <0x1c>;
+ qcom,mdss-dsi-panel-status-read-length = <1>;
+ qcom,mdss-dsi-panel-max-error-count = <3>;
};
@@ -86,7 +92,13 @@
23 1e 08 09 05 03 04 a0
23 1a 08 09 05 03 04 a0];
qcom,esd-check-enabled;
- qcom,mdss-dsi-panel-status-check-mode = "bta_check";
+ qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+ qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+ qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-panel-status-value = <0x1c>;
+ qcom,mdss-dsi-panel-on-check-value = <0x1c>;
+ qcom,mdss-dsi-panel-status-read-length = <1>;
+ qcom,mdss-dsi-panel-max-error-count = <3>;
};
&dsi_r69006_1080p_video {
diff --git a/arch/arm64/boot/dts/qcom/msm8953-mtp.dts b/arch/arm64/boot/dts/qcom/msm8953-mtp.dts
index 97c6db3..539ac59 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-mtp.dts
@@ -37,6 +37,13 @@
qcom,battery-data = <&mtp_batterydata>;
};
+&pmi_haptic{
+ qcom,lra-auto-res-mode="qwd";
+ qcom,lra-high-z="opt1";
+ qcom,lra-res-cal-period = <0>;
+ qcom,wave-play-rate-us = <4165>;
+};
+
&qpnp_smbcharger {
qcom,battery-data = <&mtp_batterydata>;
qcom,chg-led-sw-controls;
diff --git a/arch/arm64/boot/dts/qcom/msm8953-vidc.dtsi b/arch/arm64/boot/dts/qcom/msm8953-vidc.dtsi
index cb8cdf2..1558010 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-vidc.dtsi
@@ -88,8 +88,8 @@
<&apps_iommu 0x82c 0x01>,
<&apps_iommu 0x821 0x10>;
buffer-types = <0xfff>;
- virtual-addr-pool = <0x5dc00000 0x7f000000
- 0xdcc00000 0x1000000>;
+ virtual-addr-pool = <0x79000000 0x28000000
+ 0xa1000000 0xc9000000>;
};
secure_bitstream_cb {
@@ -102,7 +102,7 @@
<&apps_iommu 0x926 0x0>,
<&apps_iommu 0x929 0x2>;
buffer-types = <0x241>;
- virtual-addr-pool = <0x4b000000 0x12c00000>;
+ virtual-addr-pool = <0x51000000 0x28000000>;
qcom,secure-context-bank;
};
@@ -113,7 +113,7 @@
<&apps_iommu 0x910 0x0>,
<&apps_iommu 0x92c 0x0>;
buffer-types = <0x106>;
- virtual-addr-pool = <0x25800000 0x25800000>;
+ virtual-addr-pool = <0x29000000 0x28000000>;
qcom,secure-context-bank;
};
@@ -125,7 +125,7 @@
<&apps_iommu 0x925 0x8>,
<&apps_iommu 0x928 0x0>;
buffer-types = <0x480>;
- virtual-addr-pool = <0x1000000 0x24800000>;
+ virtual-addr-pool = <0x1000000 0x28000000>;
qcom,secure-context-bank;
};
diff --git a/arch/arm64/boot/dts/qcom/pmi632.dtsi b/arch/arm64/boot/dts/qcom/pmi632.dtsi
index 0b1a50a..65390cb 100644
--- a/arch/arm64/boot/dts/qcom/pmi632.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi632.dtsi
@@ -315,6 +315,7 @@
dpdm-supply = <&qusb_phy>;
qcom,auto-recharge-soc = <98>;
qcom,chg-vadc = <&pmi632_vadc>;
+ qcom,flash-disable-soc = <10>;
qcom,thermal-mitigation
= <3000000 2500000 2000000 1500000
@@ -464,7 +465,6 @@
"ilim1-s1",
"ilim2-s2",
"vreg-ok";
- qcom,flash-disable-soc = <10>;
};
smb5_vbus: qcom,smb5-vbus {
diff --git a/arch/arm64/boot/dts/qcom/pmi8950.dtsi b/arch/arm64/boot/dts/qcom/pmi8950.dtsi
index 8797ea8..fa93918 100644
--- a/arch/arm64/boot/dts/qcom/pmi8950.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8950.dtsi
@@ -587,13 +587,15 @@
};
};
- pmi_haptic: qcom,haptic@c000 {
+ pmi_haptic: qcom,haptics@c000 {
compatible = "qcom,qpnp-haptics";
reg = <0xc000 0x100>;
interrupts = <0x3 0xc0 0x0 IRQ_TYPE_EDGE_BOTH>,
<0x3 0xc0 0x1 IRQ_TYPE_EDGE_BOTH>;
interrupt-names = "hap-sc-irq", "hap-play-irq";
qcom,pmic-revid = <&pmi8950_revid>;
+ vcc_pon-supply = <&pon_perph_reg>;
+ qcom,int-pwm-freq-khz = <505>;
qcom,play-mode = "direct";
qcom,wave-play-rate-us = <5263>;
qcom,actuator-type = <0>;
diff --git a/arch/arm64/boot/dts/qcom/qcs605.dtsi b/arch/arm64/boot/dts/qcom/qcs605.dtsi
index 747593f..16ae8de 100644
--- a/arch/arm64/boot/dts/qcom/qcs605.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605.dtsi
@@ -108,4 +108,276 @@
&msm_gpu {
/delete-node/qcom,gpu-mempools;
+ /delete-node/qcom,gpu-pwrlevel-bins;
+
+ qcom,gpu-pwrlevel-bins {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compatible="qcom,gpu-pwrlevel-bins";
+
+ qcom,gpu-pwrlevels-0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,speed-bin = <0>;
+
+ qcom,initial-pwrlevel = <4>;
+ qcom,ca-target-pwrlevel = <5>;
+
+ /* TURBO_L1 */
+ qcom,gpu-pwrlevel@0 {
+ reg = <0>;
+ qcom,gpu-freq = <780000000>;
+ qcom,bus-freq = <11>;
+ qcom,bus-min = <10>;
+ qcom,bus-max = <11>;
+ };
+
+ /* TURBO */
+ qcom,gpu-pwrlevel@1 {
+ reg = <1>;
+ qcom,gpu-freq = <750000000>;
+ qcom,bus-freq = <11>;
+ qcom,bus-min = <9>;
+ qcom,bus-max = <11>;
+ };
+
+ /* NOM_L1 */
+ qcom,gpu-pwrlevel@2 {
+ reg = <2>;
+ qcom,gpu-freq = <650000000>;
+ qcom,bus-freq = <10>;
+ qcom,bus-min = <8>;
+ qcom,bus-max = <11>;
+ };
+
+ /* NOM */
+ qcom,gpu-pwrlevel@3 {
+ reg = <3>;
+ qcom,gpu-freq = <565000000>;
+ qcom,bus-freq = <9>;
+ qcom,bus-min = <8>;
+ qcom,bus-max = <10>;
+ };
+
+ /* SVS_L1 */
+ qcom,gpu-pwrlevel@4 {
+ reg = <4>;
+ qcom,gpu-freq = <430000000>;
+ qcom,bus-freq = <8>;
+ qcom,bus-min = <7>;
+ qcom,bus-max = <10>;
+ };
+
+ /* SVS */
+ qcom,gpu-pwrlevel@5 {
+ reg = <5>;
+ qcom,gpu-freq = <355000000>;
+ qcom,bus-freq = <7>;
+ qcom,bus-min = <5>;
+ qcom,bus-max = <8>;
+ };
+
+ /* LOW SVS */
+ qcom,gpu-pwrlevel@6 {
+ reg = <6>;
+ qcom,gpu-freq = <267000000>;
+ qcom,bus-freq = <6>;
+ qcom,bus-min = <4>;
+ qcom,bus-max = <7>;
+ };
+
+ /* MIN SVS */
+ qcom,gpu-pwrlevel@7 {
+ reg = <7>;
+ qcom,gpu-freq = <180000000>;
+ qcom,bus-freq = <4>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <4>;
+ };
+
+ /* XO */
+ qcom,gpu-pwrlevel@8 {
+ reg = <8>;
+ qcom,gpu-freq = <0>;
+ qcom,bus-freq = <0>;
+ qcom,bus-min = <0>;
+ qcom,bus-max = <0>;
+ };
+ };
+
+ qcom,gpu-pwrlevels-1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,speed-bin = <146>;
+
+ qcom,initial-pwrlevel = <3>;
+ qcom,ca-target-pwrlevel = <4>;
+
+ /* TURBO */
+ qcom,gpu-pwrlevel@0 {
+ reg = <0>;
+ qcom,gpu-freq = <700000000>;
+ qcom,bus-freq = <11>;
+ qcom,bus-min = <9>;
+ qcom,bus-max = <11>;
+ };
+
+ /* NOM_L1 */
+ qcom,gpu-pwrlevel@1 {
+ reg = <1>;
+ qcom,gpu-freq = <650000000>;
+ qcom,bus-freq = <10>;
+ qcom,bus-min = <8>;
+ qcom,bus-max = <11>;
+ };
+
+ /* NOM */
+ qcom,gpu-pwrlevel@2 {
+ reg = <2>;
+ qcom,gpu-freq = <565000000>;
+ qcom,bus-freq = <9>;
+ qcom,bus-min = <8>;
+ qcom,bus-max = <10>;
+ };
+
+ /* SVS_L1 */
+ qcom,gpu-pwrlevel@3 {
+ reg = <3>;
+ qcom,gpu-freq = <430000000>;
+ qcom,bus-freq = <8>;
+ qcom,bus-min = <7>;
+ qcom,bus-max = <10>;
+ };
+
+ /* SVS */
+ qcom,gpu-pwrlevel@4 {
+ reg = <4>;
+ qcom,gpu-freq = <355000000>;
+ qcom,bus-freq = <7>;
+ qcom,bus-min = <5>;
+ qcom,bus-max = <8>;
+ };
+
+ /* LOW SVS */
+ qcom,gpu-pwrlevel@5 {
+ reg = <5>;
+ qcom,gpu-freq = <267000000>;
+ qcom,bus-freq = <6>;
+ qcom,bus-min = <4>;
+ qcom,bus-max = <7>;
+ };
+
+ /* MIN SVS */
+ qcom,gpu-pwrlevel@6 {
+ reg = <6>;
+ qcom,gpu-freq = <180000000>;
+ qcom,bus-freq = <4>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <4>;
+ };
+
+ /* XO */
+ qcom,gpu-pwrlevel@7 {
+ reg = <7>;
+ qcom,gpu-freq = <0>;
+ qcom,bus-freq = <0>;
+ qcom,bus-min = <0>;
+ qcom,bus-max = <0>;
+ };
+ };
+
+ qcom,gpu-pwrlevels-2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,speed-bin = <163>;
+
+ qcom,initial-pwrlevel = <4>;
+ qcom,ca-target-pwrlevel = <5>;
+
+ /* TURBO_L1 */
+ qcom,gpu-pwrlevel@0 {
+ reg = <0>;
+ qcom,gpu-freq = <780000000>;
+ qcom,bus-freq = <11>;
+ qcom,bus-min = <10>;
+ qcom,bus-max = <11>;
+ };
+
+ /* TURBO */
+ qcom,gpu-pwrlevel@1 {
+ reg = <1>;
+ qcom,gpu-freq = <750000000>;
+ qcom,bus-freq = <11>;
+ qcom,bus-min = <9>;
+ qcom,bus-max = <11>;
+ };
+
+ /* NOM_L1 */
+ qcom,gpu-pwrlevel@2 {
+ reg = <2>;
+ qcom,gpu-freq = <650000000>;
+ qcom,bus-freq = <10>;
+ qcom,bus-min = <8>;
+ qcom,bus-max = <11>;
+ };
+
+ /* NOM */
+ qcom,gpu-pwrlevel@3 {
+ reg = <3>;
+ qcom,gpu-freq = <565000000>;
+ qcom,bus-freq = <9>;
+ qcom,bus-min = <8>;
+ qcom,bus-max = <10>;
+ };
+
+ /* SVS_L1 */
+ qcom,gpu-pwrlevel@4 {
+ reg = <4>;
+ qcom,gpu-freq = <430000000>;
+ qcom,bus-freq = <8>;
+ qcom,bus-min = <7>;
+ qcom,bus-max = <10>;
+ };
+
+ /* SVS */
+ qcom,gpu-pwrlevel@5 {
+ reg = <5>;
+ qcom,gpu-freq = <355000000>;
+ qcom,bus-freq = <7>;
+ qcom,bus-min = <5>;
+ qcom,bus-max = <8>;
+ };
+
+ /* LOW SVS */
+ qcom,gpu-pwrlevel@6 {
+ reg = <6>;
+ qcom,gpu-freq = <267000000>;
+ qcom,bus-freq = <6>;
+ qcom,bus-min = <4>;
+ qcom,bus-max = <7>;
+ };
+
+ /* MIN SVS */
+ qcom,gpu-pwrlevel@7 {
+ reg = <7>;
+ qcom,gpu-freq = <180000000>;
+ qcom,bus-freq = <4>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <4>;
+ };
+
+ /* XO */
+ qcom,gpu-pwrlevel@8 {
+ reg = <8>;
+ qcom,gpu-freq = <0>;
+ qcom,bus-freq = <0>;
+ qcom,bus-min = <0>;
+ qcom,bus-max = <0>;
+ };
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/sda450-pmi632-mtp-s3.dts b/arch/arm64/boot/dts/qcom/sda450-pmi632-mtp-s3.dts
index c907977..f20c2ba 100644
--- a/arch/arm64/boot/dts/qcom/sda450-pmi632-mtp-s3.dts
+++ b/arch/arm64/boot/dts/qcom/sda450-pmi632-mtp-s3.dts
@@ -14,8 +14,8 @@
/dts-v1/;
#include "sda450.dtsi"
-#include "sdm450-pmi632-mtp-s3.dtsi"
#include "sdm450-pmi632.dtsi"
+#include "sdm450-pmi632-mtp-s3.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDA450 + PMI632 MTP S3";
diff --git a/arch/arm64/boot/dts/qcom/sda845-svr-pinctrl-overlay.dtsi b/arch/arm64/boot/dts/qcom/sda845-svr-pinctrl-overlay.dtsi
index c76ef2b..19d1370 100644
--- a/arch/arm64/boot/dts/qcom/sda845-svr-pinctrl-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sda845-svr-pinctrl-overlay.dtsi
@@ -10,7 +10,7 @@
* GNU General Public License for more details.
*/
-&cam_sensor_mclk0_active{
+&cam_sensor_mclk0_active {
/* MCLK0 */
mux {
pins = "gpio13";
@@ -20,7 +20,7 @@
config {
pins = "gpio13";
bias-disable; /* No PULL */
- drive-strength = <8>; /* 2 MA */
+ drive-strength = <8>; /* 8 MA */
};
};
@@ -34,19 +34,75 @@
config {
pins = "gpio13";
bias-pull-down; /* PULL DOWN */
- drive-strength = <8>; /* 2 MA */
+ drive-strength = <8>; /* 8 MA */
+ };
+};
+
+&cam_sensor_mclk1_active {
+ /* MCLK1 */
+ mux {
+ pins = "gpio14";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio14";
+ bias-disable; /* No PULL */
+ drive-strength = <8>; /* 8 MA */
+ };
+};
+
+&cam_sensor_mclk1_suspend {
+ /* MCLK1 */
+ mux {
+ pins = "gpio14";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio14";
+ bias-pull-down; /* PULL DOWN */
+ drive-strength = <8>; /* 8 MA */
+ };
+};
+
+&cam_sensor_mclk2_active {
+ /* MCLK2 */
+ mux {
+ pins = "gpio15";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio15";
+ bias-disable; /* No PULL */
+ drive-strength = <8>; /* 8 MA */
+ };
+};
+
+&cam_sensor_mclk2_suspend {
+ /* MCLK2 */
+ mux {
+ pins = "gpio15";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio15";
+ bias-pull-down; /* PULL DOWN */
+ drive-strength = <8>; /* 8 MA */
};
};
&cam_sensor_rear_active {
/* RESET, AVDD LDO */
mux {
- pins = "gpio8","gpio79";
+ pins = "gpio8", "gpio29";
function = "gpio";
};
config {
- pins = "gpio8","gpio79";
+ pins = "gpio8", "gpio29";
bias-disable; /* No PULL */
drive-strength = <2>; /* 2 MA */
};
@@ -55,43 +111,115 @@
&cam_sensor_rear_suspend {
/* RESET, AVDD LDO */
mux {
- pins = "gpio8","gpio79";
+ pins = "gpio8", "gpio29";
function = "gpio";
};
config {
- pins = "gpio8","gpio79";
+ pins = "gpio8", "gpio29";
bias-pull-down; /* PULL DOWN */
drive-strength = <2>; /* 2 MA */
output-low;
};
};
-&cam_sensor_front_active{
- /* RESET AVDD_LDO*/
+&cam_sensor_front_active {
+ /* RESET AVDD_LDO */
mux {
- pins = "gpio26", "gpio8";
+ pins = "gpio26", "gpio12";
function = "gpio";
};
config {
- pins = "gpio26", "gpio8";
+ pins = "gpio26", "gpio12";
bias-disable; /* No PULL */
drive-strength = <2>; /* 2 MA */
};
};
-&cam_sensor_front_suspend{
+&cam_sensor_front_suspend {
/* RESET */
mux {
- pins = "gpio26", "gpio8";
+ pins = "gpio26", "gpio12";
function = "gpio";
};
config {
- pins = "gpio26", "gpio8";
+ pins = "gpio26", "gpio12";
bias-pull-down; /* PULL DOWN */
drive-strength = <2>; /* 2 MA */
output-low;
};
};
+
+&cam_sensor_iris_active {
+ /* RESET AVDD_LDO */
+ mux {
+ pins = "gpio21", "gpio122";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio21", "gpio122";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+};
+
+&cam_sensor_iris_suspend {
+ /* RESET AVDD_LDO */
+ mux {
+ pins = "gpio21", "gpio122";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio21", "gpio122";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ output-low;
+ };
+};
+
+&cam_sensor_rear_vana {
+ /* AVDD_LDO */
+ mux {
+ pins = "gpio7";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio7";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+};
+
+&cam_res_mgr_active {
+ /* AVDD_LDO */
+ mux {
+ pins = "gpio79";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio79";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+};
+
+&cam_res_mgr_suspend {
+ /* AVDD_LDO */
+ mux {
+ pins = "gpio79";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio79";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ output-low;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sda845-svr.dtsi b/arch/arm64/boot/dts/qcom/sda845-svr.dtsi
index ce62781..1062ca6 100644
--- a/arch/arm64/boot/dts/qcom/sda845-svr.dtsi
+++ b/arch/arm64/boot/dts/qcom/sda845-svr.dtsi
@@ -14,9 +14,12 @@
#include "sdm845-pinctrl-overlay.dtsi"
#include "sda845-svr-pinctrl-overlay.dtsi"
#include "sdm845-camera-sensor-svr.dtsi"
-#include "smb1355.dtsi"
#include <dt-bindings/clock/mdss-10nm-pll-clk.h>
+&qupv3_se10_i2c {
+#include "smb1355.dtsi"
+};
+
&vendor {
bluetooth: bt_wcn3990 {
compatible = "qca,wcn3990";
@@ -249,6 +252,24 @@
#cooling-cells = <2>;
};
+&pm8998_l10 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,init-voltage = <1800000>;
+};
+
+&pm8998_l15 {
+ regulator-min-microvolt = <1504000>;
+ regulator-max-microvolt = <1504000>;
+ qcom,init-voltage = <1504000>;
+};
+
+&pm8998_l16 {
+ regulator-min-microvolt = <3312000>;
+ regulator-max-microvolt = <3312000>;
+ qcom,init-voltage = <3312000>;
+};
+
&ufsphy_mem {
compatible = "qcom,ufs-phy-qmp-v3";
diff --git a/arch/arm64/boot/dts/qcom/sdm429.dtsi b/arch/arm64/boot/dts/qcom/sdm429.dtsi
index 19df054..b52ee2d 100644
--- a/arch/arm64/boot/dts/qcom/sdm429.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm429.dtsi
@@ -146,6 +146,16 @@
};
&soc {
+ devfreq_spdm_cpu {
+ status = "disabled";
+ };
+
+ devfreq_spdm_gov {
+ status = "disabled";
+ };
+};
+
+&soc {
/delete-node/ qcom,cpu-clock-8939@b111050;
clock_cpu: qcom,cpu-clock-8939@b111050 {
compatible = "qcom,cpu-clock-sdm429";
diff --git a/arch/arm64/boot/dts/qcom/sdm439-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm439-cdp.dtsi
index fd66f4b..6ecd0dd 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-cdp.dtsi
@@ -239,6 +239,19 @@
qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
qcom,mdss-dsi-bl-pmic-bank-select = <0>;
qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+ qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+ qcom,mdss-dsi-panel-status-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-panel-status-value = <0x9d 0x9d 0x9d 0x9d>;
+ qcom,mdss-dsi-panel-on-check-value = <0x9d 0x9d 0x9d 0x9d>;
+ qcom,mdss-dsi-panel-status-read-length = <4>;
+ qcom,mdss-dsi-panel-max-error-count = <3>;
+ qcom,mdss-dsi-min-refresh-rate = <48>;
+ qcom,mdss-dsi-max-refresh-rate = <60>;
+ qcom,mdss-dsi-pan-enable-dynamic-fps;
+ qcom,mdss-dsi-pan-fps-update =
+ "dfps_immediate_porch_mode_vfp";
};
&dsi_nt35695b_truly_fhd_cmd {
diff --git a/arch/arm64/boot/dts/qcom/sdm439-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm439-mtp.dtsi
index 29e0d72..0336d82 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-mtp.dtsi
@@ -214,4 +214,83 @@
qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
qcom,mdss-dsi-bl-pmic-bank-select = <0>;
qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+ qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+ qcom,mdss-dsi-panel-status-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-panel-status-value = <0x9d 0x9d 0x9d 0x9d>;
+ qcom,mdss-dsi-panel-on-check-value = <0x9d 0x9d 0x9d 0x9d>;
+ qcom,mdss-dsi-panel-status-read-length = <4>;
+ qcom,mdss-dsi-panel-max-error-count = <3>;
+ qcom,mdss-dsi-min-refresh-rate = <48>;
+ qcom,mdss-dsi-max-refresh-rate = <60>;
+ qcom,mdss-dsi-pan-enable-dynamic-fps;
+ qcom,mdss-dsi-pan-fps-update =
+ "dfps_immediate_porch_mode_vfp";
+};
+
+&i2c_2 {
+#include "smb1355.dtsi"
+};
+
+&pmi632_gpios {
+ smb_en {
+ smb_en_default: smb_en_default {
+ pins = "gpio2";
+ function = "func1";
+ output-enable;
+ };
+ };
+
+ pmi632_sense {
+ /* GPIO 7 and 8 are external-sense pins for PMI632 */
+ pmi632_sense_default: pmi632_sense_default {
+ pins = "gpio7", "gpio8";
+ bias-high-impedance; /* disable the GPIO */
+ bias-disable; /* no-pull */
+ };
+ };
+};
+
+&tlmm {
+ smb_int_default: smb_int_default {
+ mux {
+ pins = "gpio59";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio59";
+ drive-strength = <2>;
+ bias-pull-up;
+ input-enable;
+ };
+ };
+};
+
+&smb1355_0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&smb_int_default
+ &smb_en_default &pmi632_sense_default>;
+ interrupt-parent = <&tlmm>;
+ interrupts = <59 IRQ_TYPE_LEVEL_LOW>;
+ smb1355_charger_0: qcom,smb1355-charger@1000 {
+ status ="ok";
+ /delete-property/ io-channels;
+ /delete-property/ io-channels-names;
+ qcom,parallel-mode = <1>;
+ };
+};
+
+&smb1355_1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&smb_int_default
+ &smb_en_default &pmi632_sense_default>;
+ interrupt-parent = <&tlmm>;
+ interrupts = <59 IRQ_TYPE_LEVEL_LOW>;
+ smb1355_charger_1: qcom,smb1355-charger@1000 {
+ status ="ok";
+ /delete-property/ io-channels;
+ /delete-property/ io-channels-names;
+ qcom,parallel-mode = <1>;
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/sdm439-pm8953.dtsi b/arch/arm64/boot/dts/qcom/sdm439-pm8953.dtsi
index d34c34a..615489e 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-pm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-pm8953.dtsi
@@ -271,6 +271,21 @@
};
};
};
+
+ pa-therm0 {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8953_adc_tm 0x36>;
+ thermal-governor = "user_space";
+
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ };
};
&pm8953_vadc {
diff --git a/arch/arm64/boot/dts/qcom/sdm439-pmi632.dtsi b/arch/arm64/boot/dts/qcom/sdm439-pmi632.dtsi
index bc2ba9f..2bfab8f 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-pmi632.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-pmi632.dtsi
@@ -43,6 +43,27 @@
qcom,battery-data = <&mtp_batterydata>;
};
+&pmi632_vadc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&quiet_therm_default &smb_therm_default>;
+};
+
+&pmi632_gpios {
+ quiet_therm {
+ quiet_therm_default: quiet_therm_default {
+ pins = "gpio3";
+ bias-high-impedance;
+ };
+ };
+
+ smb_therm {
+ smb_therm_default: smb_therm_default {
+ pins = "gpio4";
+ bias-high-impedance;
+ };
+ };
+};
+
&pm8953_typec {
status = "disabled";
};
@@ -184,4 +205,19 @@
};
};
};
+
+ quiet-therm-adc {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pmi632_adc_tm 0x53>;
+ thermal-governor = "user_space";
+
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/sdm439-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm439-qrd.dtsi
index 1979f4e..5097b7f 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-qrd.dtsi
@@ -311,4 +311,83 @@
qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
qcom,mdss-dsi-bl-pmic-bank-select = <0>;
qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+ qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+ qcom,mdss-dsi-panel-status-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-panel-status-value = <0x9d 0x9d 0x9d 0x9d>;
+ qcom,mdss-dsi-panel-on-check-value = <0x9d 0x9d 0x9d 0x9d>;
+ qcom,mdss-dsi-panel-status-read-length = <4>;
+ qcom,mdss-dsi-panel-max-error-count = <3>;
+ qcom,mdss-dsi-min-refresh-rate = <48>;
+ qcom,mdss-dsi-max-refresh-rate = <60>;
+ qcom,mdss-dsi-pan-enable-dynamic-fps;
+ qcom,mdss-dsi-pan-fps-update =
+ "dfps_immediate_porch_mode_vfp";
+};
+
+&i2c_2 {
+#include "smb1355.dtsi"
+};
+
+&pmi632_gpios {
+ smb_en {
+ smb_en_default: smb_en_default {
+ pins = "gpio2";
+ function = "func1";
+ output-enable;
+ };
+ };
+
+ pmi632_sense {
+ /* GPIO 7 and 8 are external-sense pins for PMI632 */
+ pmi632_sense_default: pmi632_sense_default {
+ pins = "gpio7", "gpio8";
+ bias-high-impedance; /* disable the GPIO */
+ bias-disable; /* no-pull */
+ };
+ };
+};
+
+&tlmm {
+ smb_int_default: smb_int_default {
+ mux {
+ pins = "gpio59";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio59";
+ drive-strength = <2>;
+ bias-pull-up;
+ input-enable;
+ };
+ };
+};
+
+&smb1355_0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&smb_int_default
+ &smb_en_default &pmi632_sense_default>;
+ interrupt-parent = <&tlmm>;
+ interrupts = <59 IRQ_TYPE_LEVEL_LOW>;
+ smb1355_charger_0: qcom,smb1355-charger@1000 {
+ status ="ok";
+ /delete-property/ io-channels;
+ /delete-property/ io-channels-names;
+ qcom,parallel-mode = <1>;
+ };
+};
+
+&smb1355_1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&smb_int_default
+ &smb_en_default &pmi632_sense_default>;
+ interrupt-parent = <&tlmm>;
+ interrupts = <59 IRQ_TYPE_LEVEL_LOW>;
+ smb1355_charger_1: qcom,smb1355-charger@1000 {
+ status ="ok";
+ /delete-property/ io-channels;
+ /delete-property/ io-channels-names;
+ qcom,parallel-mode = <1>;
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/sdm439-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm439-regulator.dtsi
index e2f2dea..4c4c4bd 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-regulator.dtsi
@@ -479,7 +479,7 @@
<0 0 0>;
qcom,cpr-voltage-ceiling-override =
- <(-1) (-1) 795000 795000 835000 910000 910000>;
+ <(-1) (-1) 810000 845000 885000 960000 960000>;
qcom,cpr-enable;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm439.dtsi b/arch/arm64/boot/dts/qcom/sdm439.dtsi
index 0e4f666..1448a65 100644
--- a/arch/arm64/boot/dts/qcom/sdm439.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439.dtsi
@@ -50,6 +50,7 @@
qcom,governor-per-policy;
qcom,cpufreq-table-0 =
+ < 960000 >,
< 1305600 >,
< 1497600 >,
< 1708800 >,
@@ -279,6 +280,7 @@
qcom,speed0-bin-v0-c1 =
< 0 0>,
+ < 960000000 1>,
< 1305600000 1>,
< 1497600000 2>,
< 1708800000 3>,
@@ -299,6 +301,7 @@
qcom,speed1-bin-v0-c1 =
< 0 0>,
+ < 960000000 1>,
< 1305600000 1>,
< 1497600000 2>,
< 1708800000 3>,
@@ -325,6 +328,16 @@
vdd_hf_pll-supply = <&pm8953_l7_ao>;
};
+&soc {
+ devfreq_spdm_cpu {
+ status = "disabled";
+ };
+
+ devfreq_spdm_gov {
+ status = "disabled";
+ };
+};
+
&clock_gcc_mdss {
compatible = "qcom,gcc-mdss-sdm439";
clocks = <&mdss_dsi0_pll clk_dsi0pll_pixel_clk_src>,
@@ -341,10 +354,10 @@
reg = <0x001a94400 0x400>,
<0x0184d074 0x8>;
reg-names = "pll_base", "gdsc_base";
- /delete-property/ qcom,dsi-pll-ssc-en;
- /delete-property/ qcom,dsi-pll-ssc-mode;
- /delete-property/ qcom,ssc-frequency-hz;
- /delete-property/ qcom,ssc-ppm;
+ qcom,dsi-pll-ssc-en;
+ qcom,dsi-pll-ssc-mode = "down-spread";
+ qcom,ssc-frequency-hz = <31500>;
+ qcom,ssc-ppm = <5000>;
};
&mdss_dsi1_pll {
@@ -352,10 +365,10 @@
reg = <0x001a96400 0x400>,
<0x0184d074 0x8>;
reg-names = "pll_base", "gdsc_base";
- /delete-property/ qcom,dsi-pll-ssc-en;
- /delete-property/ qcom,dsi-pll-ssc-mode;
- /delete-property/ qcom,ssc-frequency-hz;
- /delete-property/ qcom,ssc-ppm;
+ qcom,dsi-pll-ssc-en;
+ qcom,dsi-pll-ssc-mode = "down-spread";
+ qcom,ssc-frequency-hz = <31500>;
+ qcom,ssc-ppm = <5000>;
};
&mdss_dsi {
@@ -622,3 +635,7 @@
};
};
};
+
+&mdss_mdp {
+ qcom,vbif-settings = <0xd0 0x20>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dts b/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dts
index b9aadc1..b73b49a 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dts
@@ -14,8 +14,8 @@
/dts-v1/;
#include "sdm450.dtsi"
-#include "sdm450-pmi632-mtp-s3.dtsi"
#include "sdm450-pmi632.dtsi"
+#include "sdm450-pmi632-mtp-s3.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDM450 + PMI632 MTP S3";
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dtsi b/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dtsi
index 64d9e64..07d2e08 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dtsi
@@ -46,3 +46,69 @@
qcom,mdss-dsi-bl-pmic-bank-select = <0>;
qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
};
+
+&i2c_2 {
+#include "smb1355.dtsi"
+};
+
+&pmi632_gpios {
+ smb_en {
+ smb_en_default: smb_en_default {
+ pins = "gpio2";
+ function = "func1";
+ output-enable;
+ };
+ };
+
+ pmi632_sense {
+ /* GPIO 7 and 8 are external-sense pins for PMI632 */
+ pmi632_sense_default: pmi632_sense_default {
+ pins = "gpio7", "gpio8";
+ bias-high-impedance; /* disable the GPIO */
+ bias-disable; /* no-pull */
+ };
+ };
+};
+
+&tlmm {
+ smb_int_default: smb_int_default {
+ mux {
+ pins = "gpio59";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio59";
+ drive-strength = <2>;
+ bias-pull-up;
+ input-enable;
+ };
+ };
+};
+
+&smb1355_0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&smb_int_default
+ &smb_en_default &pmi632_sense_default>;
+ interrupt-parent = <&tlmm>;
+ interrupts = <59 IRQ_TYPE_LEVEL_LOW>;
+ smb1355_charger_0: qcom,smb1355-charger@1000 {
+ status ="ok";
+ /delete-property/ io-channels;
+ /delete-property/ io-channels-names;
+ qcom,parallel-mode = <1>;
+ };
+};
+
+&smb1355_1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&smb_int_default
+ &smb_en_default &pmi632_sense_default>;
+ interrupt-parent = <&tlmm>;
+ interrupts = <59 IRQ_TYPE_LEVEL_LOW>;
+ smb1355_charger_1: qcom,smb1355-charger@1000 {
+ status ="ok";
+ /delete-property/ io-channels;
+ /delete-property/ io-channels-names;
+ qcom,parallel-mode = <1>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi632.dtsi b/arch/arm64/boot/dts/qcom/sdm450-pmi632.dtsi
index e09d637..6e39327 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-pmi632.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi632.dtsi
@@ -372,3 +372,24 @@
};
};
};
+
+&tlmm {
+ pmx_mdss {
+ mdss_dsi_active: mdss_dsi_active {
+ mux {
+ pins = "gpio61";
+ };
+ config {
+ pins = "gpio61";
+ };
+ };
+ mdss_dsi_suspend: mdss_dsi_suspend {
+ mux {
+ pins = "gpio61";
+ };
+ config {
+ pins = "gpio61";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4.dtsi b/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4.dtsi
index 386bd71..a2bd5cd 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4.dtsi
@@ -177,3 +177,68 @@
};
};
+&i2c_2 {
+#include "smb1355.dtsi"
+};
+
+&pmi632_gpios {
+ smb_en {
+ smb_en_default: smb_en_default {
+ pins = "gpio2";
+ function = "func1";
+ output-enable;
+ };
+ };
+
+ pmi632_sense {
+ /* GPIO 7 and 8 are external-sense pins for PMI632 */
+ pmi632_sense_default: pmi632_sense_default {
+ pins = "gpio7", "gpio8";
+ bias-high-impedance; /* disable the GPIO */
+ bias-disable; /* no-pull */
+ };
+ };
+};
+
+&tlmm {
+ smb_int_default: smb_int_default {
+ mux {
+ pins = "gpio59";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio59";
+ drive-strength = <2>;
+ bias-pull-up;
+ input-enable;
+ };
+ };
+};
+
+&smb1355_0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&smb_int_default
+ &smb_en_default &pmi632_sense_default>;
+ interrupt-parent = <&tlmm>;
+ interrupts = <59 IRQ_TYPE_LEVEL_LOW>;
+ smb1355_charger_0: qcom,smb1355-charger@1000 {
+ status ="ok";
+ /delete-property/ io-channels;
+ /delete-property/ io-channels-names;
+ qcom,parallel-mode = <1>;
+ };
+};
+
+&smb1355_1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&smb_int_default
+ &smb_en_default &pmi632_sense_default>;
+ interrupt-parent = <&tlmm>;
+ interrupts = <59 IRQ_TYPE_LEVEL_LOW>;
+ smb1355_charger_1: qcom,smb1355-charger@1000 {
+ status ="ok";
+ /delete-property/ io-channels;
+ /delete-property/ io-channels-names;
+ qcom,parallel-mode = <1>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm632-cdp-s2.dts b/arch/arm64/boot/dts/qcom/sdm632-cdp-s2.dts
index 2669d1f..9d6543f 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-cdp-s2.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-cdp-s2.dts
@@ -24,33 +24,3 @@
qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
};
-
-&soc {
- gpio_keys {
- /delete-node/home;
- };
-};
-
-&tlmm {
- tlmm_gpio_key {
- gpio_key_active: gpio_key_active {
- mux {
- pins = "gpio85", "gpio86", "gpio87";
- };
-
- config {
- pins = "gpio85", "gpio86", "gpio87";
- };
- };
-
- gpio_key_suspend: gpio_key_suspend {
- mux {
- pins = "gpio85", "gpio86", "gpio87";
- };
-
- config {
- pins = "gpio85", "gpio86", "gpio87";
- };
- };
- };
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm632-ext-codec-cdp-s3.dts b/arch/arm64/boot/dts/qcom/sdm632-ext-codec-cdp-s3.dts
index 17ae9d1..60b149d 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-ext-codec-cdp-s3.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-ext-codec-cdp-s3.dts
@@ -24,34 +24,3 @@
qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
};
-
-&soc {
- gpio_keys {
- /delete-node/home;
- };
-};
-
-&tlmm {
- tlmm_gpio_key {
- gpio_key_active: gpio_key_active {
- mux {
- pins = "gpio85", "gpio86", "gpio87";
- };
-
- config {
- pins = "gpio85", "gpio86", "gpio87";
- };
- };
-
- gpio_key_suspend: gpio_key_suspend {
- mux {
- pins = "gpio85", "gpio86", "gpio87";
- };
-
- config {
- pins = "gpio85", "gpio86", "gpio87";
- };
- };
- };
-};
-
diff --git a/arch/arm64/boot/dts/qcom/sdm632-mtp-s3.dts b/arch/arm64/boot/dts/qcom/sdm632-mtp-s3.dts
index 3662cf3..1dd1163 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-mtp-s3.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-mtp-s3.dts
@@ -14,8 +14,8 @@
/dts-v1/;
#include "sdm632.dtsi"
-#include "sdm450-pmi632-mtp-s3.dtsi"
#include "sdm450-pmi632.dtsi"
+#include "sdm450-pmi632-mtp-s3.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDM632 + PMI632 MTP S3";
diff --git a/arch/arm64/boot/dts/qcom/sdm632-pm8004-cdp-s2.dts b/arch/arm64/boot/dts/qcom/sdm632-pm8004-cdp-s2.dts
index 4d68901..e0e6b4b 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-pm8004-cdp-s2.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-pm8004-cdp-s2.dts
@@ -25,34 +25,3 @@
qcom,pmic-id = <0x010016 0x25 0xC 0x0>;
};
-
-&soc {
- gpio_keys {
- /delete-node/home;
- };
-};
-
-&tlmm {
- tlmm_gpio_key {
- gpio_key_active: gpio_key_active {
- mux {
- pins = "gpio85", "gpio86", "gpio87";
- };
-
- config {
- pins = "gpio85", "gpio86", "gpio87";
- };
- };
-
- gpio_key_suspend: gpio_key_suspend {
- mux {
- pins = "gpio85", "gpio86", "gpio87";
- };
-
- config {
- pins = "gpio85", "gpio86", "gpio87";
- };
- };
- };
-};
-
diff --git a/arch/arm64/boot/dts/qcom/sdm632-pm8004-ext-codec-cdp-s3.dts b/arch/arm64/boot/dts/qcom/sdm632-pm8004-ext-codec-cdp-s3.dts
index 6ca2940..413e85f 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-pm8004-ext-codec-cdp-s3.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-pm8004-ext-codec-cdp-s3.dts
@@ -25,34 +25,3 @@
qcom,pmic-id = <0x010016 0x25 0xC 0x0>;
};
-
-&soc {
- gpio_keys {
- /delete-node/home;
- };
-};
-
-&tlmm {
- tlmm_gpio_key {
- gpio_key_active: gpio_key_active {
- mux {
- pins = "gpio85", "gpio86", "gpio87";
- };
-
- config {
- pins = "gpio85", "gpio86", "gpio87";
- };
- };
-
- gpio_key_suspend: gpio_key_suspend {
- mux {
- pins = "gpio85", "gpio86", "gpio87";
- };
-
- config {
- pins = "gpio85", "gpio86", "gpio87";
- };
- };
- };
-};
-
diff --git a/arch/arm64/boot/dts/qcom/sdm632-pm8004-mtp-s3.dts b/arch/arm64/boot/dts/qcom/sdm632-pm8004-mtp-s3.dts
index d2a9cf1..aea6bff 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-pm8004-mtp-s3.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-pm8004-mtp-s3.dts
@@ -14,8 +14,8 @@
/dts-v1/;
#include "sdm632.dtsi"
-#include "sdm450-pmi632-mtp-s3.dtsi"
#include "sdm450-pmi632.dtsi"
+#include "sdm450-pmi632-mtp-s3.dtsi"
#include "sdm632-pm8004.dtsi"
/ {
diff --git a/arch/arm64/boot/dts/qcom/sdm632-rcm.dts b/arch/arm64/boot/dts/qcom/sdm632-rcm.dts
index fe7ab38..68f0ea0 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-rcm.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-rcm.dts
@@ -24,32 +24,3 @@
qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
};
-&soc {
- gpio_keys {
- /delete-node/home;
- };
-};
-
-&tlmm {
- tlmm_gpio_key {
- gpio_key_active: gpio_key_active {
- mux {
- pins = "gpio85", "gpio86", "gpio87";
- };
-
- config {
- pins = "gpio85", "gpio86", "gpio87";
- };
- };
-
- gpio_key_suspend: gpio_key_suspend {
- mux {
- pins = "gpio85", "gpio86", "gpio87";
- };
-
- config {
- pins = "gpio85", "gpio86", "gpio87";
- };
- };
- };
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm632-rcm.dtsi b/arch/arm64/boot/dts/qcom/sdm632-rcm.dtsi
index 14ba3b4..aa20680 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-rcm.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm632-rcm.dtsi
@@ -13,3 +13,34 @@
#include "sdm450-pmi632-cdp-s2.dtsi"
+&soc {
+ gpio_keys {
+ home {
+ status = "disabled";
+ };
+ };
+};
+
+&tlmm {
+ tlmm_gpio_key {
+ gpio_key_active {
+ mux {
+ pins = "gpio85", "gpio86", "gpio87";
+ };
+
+ config {
+ pins = "gpio85", "gpio86", "gpio87";
+ };
+ };
+
+ gpio_key_suspend {
+ mux {
+ pins = "gpio85", "gpio86", "gpio87";
+ };
+
+ config {
+ pins = "gpio85", "gpio86", "gpio87";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm632.dtsi b/arch/arm64/boot/dts/qcom/sdm632.dtsi
index 67efe0f..4bc1c67 100644
--- a/arch/arm64/boot/dts/qcom/sdm632.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm632.dtsi
@@ -38,6 +38,16 @@
compatible = "qcom,cc-debug-sdm632";
};
+&soc {
+ devfreq_spdm_cpu {
+ status = "disabled";
+ };
+
+ devfreq_spdm_gov {
+ status = "disabled";
+ };
+};
+
&clock_gcc_gfx {
compatible = "qcom,gcc-gfx-sdm632";
qcom,gfxfreq-corner =
@@ -1005,3 +1015,97 @@
};
#include "sdm632-coresight.dtsi"
+
+/* GPU Overrides*/
+&msm_gpu {
+
+ qcom,ca-target-pwrlevel = <4>;
+ qcom,initial-pwrlevel = <5>;
+ /delete-node/qcom,gpu-pwrlevels;
+
+ /* Power levels */
+ qcom,gpu-pwrlevels {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compatible = "qcom,gpu-pwrlevels";
+ /* TURBO LD0 */
+ qcom,gpu-pwrlevel@0 {
+ reg = <0>;
+ qcom,gpu-freq = <725000000>;
+ qcom,bus-freq = <10>;
+ qcom,bus-min = <10>;
+ qcom,bus-max = <10>;
+ };
+
+ /* TURBO */
+ qcom,gpu-pwrlevel@1 {
+ reg = <1>;
+ qcom,gpu-freq = <650000000>;
+ qcom,bus-freq = <10>;
+ qcom,bus-min = <10>;
+ qcom,bus-max = <10>;
+ };
+
+ /* NOM+ */
+ qcom,gpu-pwrlevel@2 {
+ reg = <2>;
+ qcom,gpu-freq = <560000000>;
+ qcom,bus-freq = <10>;
+ qcom,bus-min = <8>;
+ qcom,bus-max = <10>;
+ };
+
+ /* NOM */
+ qcom,gpu-pwrlevel@3 {
+ reg = <3>;
+ qcom,gpu-freq = <510000000>;
+ qcom,bus-freq = <9>;
+ qcom,bus-min = <6>;
+ qcom,bus-max = <10>;
+ };
+
+ /* SVS+ */
+ qcom,gpu-pwrlevel@4 {
+ reg = <4>;
+ qcom,gpu-freq = <400000000>;
+ qcom,bus-freq = <7>;
+ qcom,bus-min = <5>;
+ qcom,bus-max = <8>;
+ };
+
+ /* SVS */
+ qcom,gpu-pwrlevel@5 {
+ reg = <5>;
+ qcom,gpu-freq = <320000000>;
+ qcom,bus-freq = <4>;
+ qcom,bus-min = <2>;
+ qcom,bus-max = <6>;
+ };
+
+ /* Low SVS */
+ qcom,gpu-pwrlevel@6 {
+ reg = <6>;
+ qcom,gpu-freq = <216000000>;
+ qcom,bus-freq = <1>;
+ qcom,bus-min = <1>;
+ qcom,bus-max = <4>;
+ };
+
+ qcom,gpu-pwrlevel@7 {
+ reg = <7>;
+ qcom,gpu-freq = <133300000>;
+ qcom,bus-freq = <1>;
+ qcom,bus-min = <1>;
+ qcom,bus-max = <4>;
+ };
+ /* XO */
+ qcom,gpu-pwrlevel@8 {
+ reg = <8>;
+ qcom,gpu-freq = <19200000>;
+ qcom,bus-freq = <0>;
+ qcom,bus-min = <0>;
+ qcom,bus-max = <0>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi
index 9402294..348ba6f 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi
@@ -411,6 +411,7 @@
qcom,cpas-hw-ver = <0x170110>; /* Titan v170 v1.1.0 */
nvmem-cells = <&minor_rev>;
nvmem-cell-names = "minor_rev";
+ camnoc-axi-min-ib-bw = <3000000000>;
regulator-names = "camss-vdd";
camss-vdd-supply = <&titan_top_gdsc>;
clock-names = "gcc_ahb_clk",
diff --git a/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
index 7764837..dbc3651 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
@@ -14,7 +14,10 @@
#include "sdm670-pmic-overlay.dtsi"
#include "sdm670-sde-display.dtsi"
#include "sdm670-camera-sensor-mtp.dtsi"
+
+&qupv3_se10_i2c {
#include "smb1355.dtsi"
+};
&ufsphy_mem {
compatible = "qcom,ufs-phy-qmp-v3";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
index 43f1465..3b8b375 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
@@ -14,9 +14,12 @@
#include "sdm670-camera-sensor-qrd.dtsi"
#include "sdm670-pmic-overlay.dtsi"
#include "sdm670-audio-overlay.dtsi"
-#include "smb1355.dtsi"
#include "sdm670-sde-display.dtsi"
+&qupv3_se10_i2c {
+#include "smb1355.dtsi"
+};
+
&qupv3_se9_2uart {
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
index 48deca6..5579dab 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
@@ -858,6 +858,7 @@
qcom,mdss-dsi-panel-status-value = <0x9c>;
qcom,mdss-dsi-panel-on-check-value = <0x9c>;
qcom,mdss-dsi-panel-status-read-length = <1>;
+ qcom,mdss-dsi-panel-cmds-only-by-right;
qcom,mdss-dsi-display-timings {
timing@0{
qcom,mdss-dsi-panel-phy-timings = [00 1f 08 08 24 23 08
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index 4b39207..fbfae4d 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -730,6 +730,7 @@
<0x17a60000 0x100000>; /* GICR * 8 */
interrupts = <1 9 4>;
interrupt-parent = <&intc>;
+ ignored-save-restore-irqs = <38>;
};
pdc: interrupt-controller@b220000{
@@ -2216,6 +2217,7 @@
status = "ok";
memory-region = <&pil_modem_mem>;
qcom,mem-protect-id = <0xF>;
+ qcom,complete-ramdump;
/* GPIO inputs from mss */
qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_1_in 0 0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-svr.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-svr.dtsi
index d387f93..aa068e5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-svr.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-svr.dtsi
@@ -220,7 +220,7 @@
&cam_sensor_rear2_suspend>;
gpios = <&tlmm 15 0>,
<&tlmm 9 0>,
- <&tlmm 8 0>;
+ <&tlmm 7 0>;
gpio-reset = <1>;
gpio-vana = <2>;
gpio-req-tbl-num = <0 1 2>;
@@ -261,7 +261,7 @@
&cam_sensor_front_suspend>;
gpios = <&tlmm 14 0>,
<&tlmm 28 0>,
- <&tlmm 8 0>;
+ <&tlmm 7 0>;
gpio-reset = <1>;
gpio-vana = <2>;
gpio-req-tbl-num = <0 1 2>;
@@ -441,37 +441,39 @@
sensor-position-roll = <270>;
sensor-position-pitch = <0>;
sensor-position-yaw = <0>;
- led-flash-src = <&led_flash_iris>;
- cam_vio-supply = <&pm8998_lvs1>;
- cam_vana-supply = <&pmi8998_bob>;
- cam_vdig-supply = <&camera_ldo>;
+ cam_vio-supply = <&pm8998_l9>;
+ cam_vana-supply = <&pm8998_l16>;
+ cam_vdig-supply = <&pm8998_l10>;
cam_clk-supply = <&titan_top_gdsc>;
regulator-names = "cam_vio", "cam_vana", "cam_vdig",
"cam_clk";
rgltr-cntrl-support;
- rgltr-min-voltage = <0 3312000 1050000 0>;
- rgltr-max-voltage = <0 3600000 1050000 0>;
+ rgltr-min-voltage = <1800000 3312000 1800000 0>;
+ rgltr-max-voltage = <1800000 3312000 1800000 0>;
rgltr-load-current = <0 80000 105000 0>;
gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
- pinctrl-0 = <&cam_sensor_mclk3_active
+ pinctrl-0 = <&cam_sensor_mclk2_active
&cam_sensor_iris_active>;
- pinctrl-1 = <&cam_sensor_mclk3_suspend
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
&cam_sensor_iris_suspend>;
- gpios = <&tlmm 16 0>,
- <&tlmm 9 0>,
- <&tlmm 8 0>;
+ gpios = <&tlmm 15 0>,
+ <&tlmm 21 0>,
+ <&tlmm 122 0>,
+ <&tlmm 59 0>;
gpio-reset = <1>;
gpio-vana = <2>;
- gpio-req-tbl-num = <0 1 2>;
- gpio-req-tbl-flags = <1 0 0>;
+ gpio-vdig = <3>;
+ gpio-req-tbl-num = <0 1 2 3>;
+ gpio-req-tbl-flags = <1 0 0 0>;
gpio-req-tbl-label = "CAMIF_MCLK3",
"CAM_RESET3",
- "CAM_VANA1";
+ "CAM_VANA3",
+ "CAM_VDIG3";
sensor-mode = <0>;
cci-master = <1>;
status = "ok";
- clocks = <&clock_camcc CAM_CC_MCLK3_CLK>;
+ clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
clock-names = "cam_clk";
clock-cntl-level = "turbo";
clock-rates = <24000000>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
index fd6a0c7..2e2de74 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
@@ -381,6 +381,7 @@
interrupt-names = "cpas_camnoc";
interrupts = <0 459 0>;
qcom,cpas-hw-ver = <0x170100>; /* Titan v170 v1.0.0 */
+ camnoc-axi-min-ib-bw = <3000000000>;
regulator-names = "camss-vdd";
camss-vdd-supply = <&titan_top_gdsc>;
clock-names = "gcc_ahb_clk",
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index 349c4c0..812a313 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -14,7 +14,10 @@
#include "sdm845-pmic-overlay.dtsi"
#include "sdm845-pinctrl-overlay.dtsi"
#include "sdm845-camera-sensor-mtp.dtsi"
+
+&qupv3_se10_i2c {
#include "smb1355.dtsi"
+};
&vendor {
bluetooth: bt_wcn3990 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index 6034b6d..f5a979c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -13,9 +13,12 @@
#include "sdm845-pmic-overlay.dtsi"
#include "sdm845-pinctrl-overlay.dtsi"
#include "sdm845-camera-sensor-qrd.dtsi"
-#include "smb1355.dtsi"
#include <dt-bindings/gpio/gpio.h>
+&qupv3_se10_i2c {
+#include "smb1355.dtsi"
+};
+
&vendor {
bluetooth: bt_wcn3990 {
compatible = "qca,wcn3990";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
index a5c6ab5..b2b0000 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
@@ -18,7 +18,10 @@
#include "sdm845-pmic-overlay.dtsi"
#include "sdm845-pinctrl-overlay.dtsi"
+
+&qupv3_se10_i2c {
#include "smb1355.dtsi"
+};
&vendor {
bluetooth: bt_wcn3990 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index bfcebf6..6132722 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -258,33 +258,13 @@
/* data and reg bus scale settings */
qcom,sde-data-bus {
- qcom,msm-bus,name = "mdss_sde_mnoc";
+ qcom,msm-bus,name = "mdss_sde";
qcom,msm-bus,num-cases = <3>;
qcom,msm-bus,num-paths = <2>;
qcom,msm-bus,vectors-KBps =
- <22 773 0 0>, <23 773 0 0>,
- <22 773 0 6400000>, <23 773 0 6400000>,
- <22 773 0 6400000>, <23 773 0 6400000>;
- };
-
- qcom,sde-llcc-bus {
- qcom,msm-bus,name = "mdss_sde_llcc";
- qcom,msm-bus,num-cases = <3>;
- qcom,msm-bus,num-paths = <1>;
- qcom,msm-bus,vectors-KBps =
- <132 770 0 0>,
- <132 770 0 6400000>,
- <132 770 0 6400000>;
- };
-
- qcom,sde-ebi-bus {
- qcom,msm-bus,name = "mdss_sde_ebi";
- qcom,msm-bus,num-cases = <3>;
- qcom,msm-bus,num-paths = <1>;
- qcom,msm-bus,vectors-KBps =
- <129 512 0 0>,
- <129 512 0 6400000>,
- <129 512 0 6400000>;
+ <22 512 0 0>, <23 512 0 0>,
+ <22 512 0 6400000>, <23 512 0 6400000>,
+ <22 512 0 6400000>, <23 512 0 6400000>;
};
qcom,sde-reg-bus {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
index 85419c8..97cb981 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
@@ -302,6 +302,7 @@
interrupt-names = "cpas_camnoc";
interrupts = <0 459 0>;
qcom,cpas-hw-ver = <0x170110>; /* Titan v170 v1.1.0 */
+ camnoc-axi-min-ib-bw = <3000000000>;
regulator-names = "camss-vdd";
camss-vdd-supply = <&titan_top_gdsc>;
clock-names = "gcc_ahb_clk",
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
index ba76273..229d06b 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
@@ -51,7 +51,7 @@
compatible = "qcom,memshare-peripheral";
qcom,peripheral-size = <0x500000>;
qcom,client-id = <1>;
- qcom,allocate-boot-time;
+ qcom,allocate-on-request;
label = "modem";
};
};
@@ -470,6 +470,7 @@
2784000 35000
2803200 40000
2841600 50000
+ 2956800 60000
>;
idle-cost-data = <
100 80 60 40
@@ -537,6 +538,7 @@
2784000 165
2803200 170
2841600 180
+ 2956800 190
>;
idle-cost-data = <
4 3 2 1
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index e9a913f..6c71212 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -776,6 +776,7 @@
<0x17a60000 0x100000>; /* GICR * 8 */
interrupts = <1 9 4>;
interrupt-parent = <&intc>;
+ ignored-save-restore-irqs = <38>;
};
pdc: interrupt-controller@b220000{
diff --git a/arch/arm64/boot/dts/qcom/smb1355.dtsi b/arch/arm64/boot/dts/qcom/smb1355.dtsi
index 3412b25d..5939440 100644
--- a/arch/arm64/boot/dts/qcom/smb1355.dtsi
+++ b/arch/arm64/boot/dts/qcom/smb1355.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -12,98 +12,96 @@
#include <dt-bindings/interrupt-controller/irq.h>
-&qupv3_se10_i2c {
- smb1355_0: qcom,smb1355@8 {
- compatible = "qcom,i2c-pmic";
- reg = <0x8>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupt-parent = <&spmi_bus>;
- interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>;
- interrupt_names = "smb1355_0";
- interrupt-controller;
- #interrupt-cells = <3>;
- qcom,periph-map = <0x10 0x12 0x13 0x16>;
+smb1355_0: qcom,smb1355@8 {
+ compatible = "qcom,i2c-pmic";
+ reg = <0x8>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupt-parent = <&spmi_bus>;
+ interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>;
+ interrupt_names = "smb1355_0";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ qcom,periph-map = <0x10 0x12 0x13 0x16>;
- smb1355_revid_0: qcom,revid@100 {
- compatible = "qcom,qpnp-revid";
- reg = <0x100 0x100>;
- };
-
- smb1355_charger_0: qcom,smb1355-charger@1000 {
- compatible = "qcom,smb1355";
- qcom,pmic-revid = <&smb1355_revid_0>;
- reg = <0x1000 0x700>;
- #address-cells = <1>;
- #size-cells = <1>;
- interrupt-parent = <&smb1355_0>;
- status = "disabled";
-
- io-channels = <&pmi8998_rradc 2>,
- <&pmi8998_rradc 12>;
- io-channel-names = "charger_temp",
- "charger_temp_max";
-
- qcom,chgr@1000 {
- reg = <0x1000 0x100>;
- interrupts = <0x10 0x1 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "chg-state-change";
- };
-
- qcom,chgr-misc@1600 {
- reg = <0x1600 0x100>;
- interrupts = <0x16 0x1 IRQ_TYPE_EDGE_RISING>,
- <0x16 0x6 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "wdog-bark",
- "temperature-change";
- };
- };
+ smb1355_revid_0: qcom,revid@100 {
+ compatible = "qcom,qpnp-revid";
+ reg = <0x100 0x100>;
};
- smb1355_1: qcom,smb1355@c {
- compatible = "qcom,i2c-pmic";
- reg = <0xc>;
+ smb1355_charger_0: qcom,smb1355-charger@1000 {
+ compatible = "qcom,smb1355";
+ qcom,pmic-revid = <&smb1355_revid_0>;
+ reg = <0x1000 0x700>;
#address-cells = <1>;
- #size-cells = <0>;
- interrupt-parent = <&spmi_bus>;
- interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>;
- interrupt_names = "smb1355_1";
- interrupt-controller;
- #interrupt-cells = <3>;
- qcom,periph-map = <0x10 0x12 0x13 0x16>;
+ #size-cells = <1>;
+ interrupt-parent = <&smb1355_0>;
+ status = "disabled";
- smb1355_revid_1: qcom,revid@100 {
- compatible = "qcom,qpnp-revid";
- reg = <0x100 0x100>;
+ io-channels = <&pmi8998_rradc 2>,
+ <&pmi8998_rradc 12>;
+ io-channel-names = "charger_temp",
+ "charger_temp_max";
+
+ qcom,chgr@1000 {
+ reg = <0x1000 0x100>;
+ interrupts = <0x10 0x1 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "chg-state-change";
};
- smb1355_charger_1: qcom,smb1355-charger@1000 {
- compatible = "qcom,smb1355";
- qcom,pmic-revid = <&smb1355_revid_1>;
- reg = <0x1000 0x700>;
- #address-cells = <1>;
- #size-cells = <1>;
- interrupt-parent = <&smb1355_1>;
- status = "disabled";
+ qcom,chgr-misc@1600 {
+ reg = <0x1600 0x100>;
+ interrupts = <0x16 0x1 IRQ_TYPE_EDGE_RISING>,
+ <0x16 0x6 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog-bark",
+ "temperature-change";
+ };
+ };
+};
- io-channels = <&pmi8998_rradc 2>,
- <&pmi8998_rradc 12>;
- io-channel-names = "charger_temp",
- "charger_temp_max";
+smb1355_1: qcom,smb1355@c {
+ compatible = "qcom,i2c-pmic";
+ reg = <0xc>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupt-parent = <&spmi_bus>;
+ interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>;
+ interrupt_names = "smb1355_1";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ qcom,periph-map = <0x10 0x12 0x13 0x16>;
- qcom,chgr@1000 {
- reg = <0x1000 0x100>;
- interrupts = <0x10 0x1 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "chg-state-change";
- };
+ smb1355_revid_1: qcom,revid@100 {
+ compatible = "qcom,qpnp-revid";
+ reg = <0x100 0x100>;
+ };
- qcom,chgr-misc@1600 {
- reg = <0x1600 0x100>;
- interrupts = <0x16 0x1 IRQ_TYPE_EDGE_RISING>,
- <0x16 0x6 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "wdog-bark",
- "temperature-change";
- };
+ smb1355_charger_1: qcom,smb1355-charger@1000 {
+ compatible = "qcom,smb1355";
+ qcom,pmic-revid = <&smb1355_revid_1>;
+ reg = <0x1000 0x700>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&smb1355_1>;
+ status = "disabled";
+
+ io-channels = <&pmi8998_rradc 2>,
+ <&pmi8998_rradc 12>;
+ io-channel-names = "charger_temp",
+ "charger_temp_max";
+
+ qcom,chgr@1000 {
+ reg = <0x1000 0x100>;
+ interrupts = <0x10 0x1 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "chg-state-change";
+ };
+
+ qcom,chgr-misc@1600 {
+ reg = <0x1600 0x100>;
+ interrupts = <0x16 0x1 IRQ_TYPE_EDGE_RISING>,
+ <0x16 0x6 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog-bark",
+ "temperature-change";
};
};
};
diff --git a/arch/arm64/configs/msm8937-perf_defconfig b/arch/arm64/configs/msm8937-perf_defconfig
index 1a31ee3..ac6cc3d 100644
--- a/arch/arm64/configs/msm8937-perf_defconfig
+++ b/arch/arm64/configs/msm8937-perf_defconfig
@@ -102,6 +102,7 @@
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
@@ -113,6 +114,7 @@
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
CONFIG_NETFILTER=y
@@ -307,6 +309,10 @@
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26=y
+CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_HBTP_INPUT=y
CONFIG_INPUT_QPNP_POWER_ON=y
@@ -344,6 +350,7 @@
CONFIG_POWER_RESET_SYSCON=y
CONFIG_QPNP_FG=y
CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
CONFIG_SMB1351_USB_CHARGER=y
CONFIG_QPNP_SMB5=y
CONFIG_QPNP_SMBCHARGER=y
@@ -365,6 +372,7 @@
CONFIG_REGULATOR_COOLING_DEVICE=y
CONFIG_QTI_BCL_PMIC5=y
CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_MFD_I2C_PMIC=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
@@ -575,6 +583,7 @@
CONFIG_QCOM_DEVFREQ_DEVBW=y
CONFIG_SPDM_SCM=y
CONFIG_DEVFREQ_SPDM=y
+CONFIG_IIO=y
CONFIG_PWM=y
CONFIG_PWM_QPNP=y
CONFIG_PWM_QTI_LPG=y
diff --git a/arch/arm64/configs/msm8937_defconfig b/arch/arm64/configs/msm8937_defconfig
index 2bcdc2c..ced9c40 100644
--- a/arch/arm64/configs/msm8937_defconfig
+++ b/arch/arm64/configs/msm8937_defconfig
@@ -106,6 +106,7 @@
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
@@ -117,6 +118,7 @@
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
CONFIG_NETFILTER=y
@@ -313,6 +315,10 @@
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26=y
+CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_HBTP_INPUT=y
CONFIG_INPUT_QPNP_POWER_ON=y
@@ -352,6 +358,7 @@
CONFIG_POWER_RESET_SYSCON=y
CONFIG_QPNP_FG=y
CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
CONFIG_SMB1351_USB_CHARGER=y
CONFIG_QPNP_SMB5=y
CONFIG_QPNP_SMBCHARGER=y
@@ -373,6 +380,7 @@
CONFIG_REGULATOR_COOLING_DEVICE=y
CONFIG_QTI_BCL_PMIC5=y
CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_MFD_I2C_PMIC=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
@@ -593,6 +601,7 @@
CONFIG_QCOM_DEVFREQ_DEVBW=y
CONFIG_SPDM_SCM=y
CONFIG_DEVFREQ_SPDM=y
+CONFIG_IIO=y
CONFIG_PWM=y
CONFIG_PWM_QPNP=y
CONFIG_PWM_QTI_LPG=y
diff --git a/arch/arm64/configs/msm8953-perf_defconfig b/arch/arm64/configs/msm8953-perf_defconfig
index 8fece0ee..b46317b 100644
--- a/arch/arm64/configs/msm8953-perf_defconfig
+++ b/arch/arm64/configs/msm8953-perf_defconfig
@@ -18,6 +18,7 @@
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
CONFIG_RT_GROUP_SCHED=y
@@ -72,6 +73,7 @@
CONFIG_SWP_EMULATION=y
CONFIG_CP15_BARRIER_EMULATION=y
CONFIG_SETEND_EMULATION=y
+CONFIG_ARM64_SW_TTBR0_PAN=y
# CONFIG_ARM64_VHE is not set
CONFIG_RANDOMIZE_BASE=y
CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
@@ -103,6 +105,7 @@
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
@@ -114,6 +117,7 @@
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
CONFIG_NETFILTER=y
@@ -345,6 +349,7 @@
CONFIG_POWER_RESET_SYSCON=y
CONFIG_QPNP_FG=y
CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
CONFIG_SMB1351_USB_CHARGER=y
CONFIG_QPNP_SMB5=y
CONFIG_QPNP_SMBCHARGER=y
@@ -366,6 +371,7 @@
CONFIG_REGULATOR_COOLING_DEVICE=y
CONFIG_QTI_BCL_PMIC5=y
CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_MFD_I2C_PMIC=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
@@ -579,12 +585,14 @@
CONFIG_WCNSS_CORE=y
CONFIG_WCNSS_CORE_PRONTO=y
CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
+CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
CONFIG_DEVFREQ_SIMPLE_DEV=y
CONFIG_QCOM_DEVFREQ_DEVBW=y
CONFIG_SPDM_SCM=y
CONFIG_DEVFREQ_SPDM=y
+CONFIG_IIO=y
CONFIG_PWM=y
CONFIG_PWM_QPNP=y
CONFIG_PWM_QTI_LPG=y
diff --git a/arch/arm64/configs/msm8953_defconfig b/arch/arm64/configs/msm8953_defconfig
index 0eb9df4..22e3510 100644
--- a/arch/arm64/configs/msm8953_defconfig
+++ b/arch/arm64/configs/msm8953_defconfig
@@ -19,6 +19,7 @@
CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
CONFIG_CGROUP_DEBUG=y
CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
CONFIG_RT_GROUP_SCHED=y
@@ -75,6 +76,7 @@
CONFIG_SWP_EMULATION=y
CONFIG_CP15_BARRIER_EMULATION=y
CONFIG_SETEND_EMULATION=y
+CONFIG_ARM64_SW_TTBR0_PAN=y
# CONFIG_ARM64_VHE is not set
CONFIG_RANDOMIZE_BASE=y
CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
@@ -107,6 +109,7 @@
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
@@ -118,6 +121,7 @@
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
CONFIG_NETFILTER=y
@@ -354,6 +358,7 @@
CONFIG_POWER_RESET_SYSCON=y
CONFIG_QPNP_FG=y
CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
CONFIG_SMB1351_USB_CHARGER=y
CONFIG_QPNP_SMB5=y
CONFIG_QPNP_SMBCHARGER=y
@@ -375,6 +380,7 @@
CONFIG_REGULATOR_COOLING_DEVICE=y
CONFIG_QTI_BCL_PMIC5=y
CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_MFD_I2C_PMIC=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
@@ -599,12 +605,14 @@
CONFIG_WCNSS_CORE=y
CONFIG_WCNSS_CORE_PRONTO=y
CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
+CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
CONFIG_DEVFREQ_SIMPLE_DEV=y
CONFIG_QCOM_DEVFREQ_DEVBW=y
CONFIG_SPDM_SCM=y
CONFIG_DEVFREQ_SPDM=y
+CONFIG_IIO=y
CONFIG_PWM=y
CONFIG_PWM_QPNP=y
CONFIG_PWM_QTI_LPG=y
diff --git a/arch/arm64/configs/sdm670-perf_defconfig b/arch/arm64/configs/sdm670-perf_defconfig
index 8e7c369..bf43e36 100644
--- a/arch/arm64/configs/sdm670-perf_defconfig
+++ b/arch/arm64/configs/sdm670-perf_defconfig
@@ -103,9 +103,11 @@
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
CONFIG_INET_DIAG_DESTROY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
@@ -114,6 +116,7 @@
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
CONFIG_NETFILTER=y
diff --git a/arch/arm64/configs/sdm670_defconfig b/arch/arm64/configs/sdm670_defconfig
index 5fb8fdb..72604a5 100644
--- a/arch/arm64/configs/sdm670_defconfig
+++ b/arch/arm64/configs/sdm670_defconfig
@@ -108,9 +108,11 @@
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
CONFIG_INET_DIAG_DESTROY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
@@ -119,6 +121,7 @@
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
CONFIG_NETFILTER=y
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index fe5b5b5..a0a7031 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -105,6 +105,7 @@
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
CONFIG_INET_DIAG_DESTROY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
@@ -549,6 +550,7 @@
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
CONFIG_QSEE_IPC_IRQ_BRIDGE=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
@@ -572,8 +574,11 @@
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_F2FS_FS=y
CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_FS_ENCRYPTION=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y
@@ -605,6 +610,7 @@
CONFIG_CORESIGHT_EVENT=y
CONFIG_CORESIGHT_HWEVENT=y
CONFIG_CORESIGHT_DUMMY=y
+CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 666f350..41959a6 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -108,6 +108,7 @@
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
CONFIG_INET_DIAG_DESTROY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
@@ -568,6 +569,7 @@
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
CONFIG_MSM_REMOTEQDSS=y
CONFIG_QSEE_IPC_IRQ_BRIDGE=y
CONFIG_QCOM_BIMC_BWMON=y
@@ -593,8 +595,11 @@
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_F2FS_FS=y
CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_FS_ENCRYPTION=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y
@@ -670,6 +675,7 @@
CONFIG_CORESIGHT_TGU=y
CONFIG_CORESIGHT_HWEVENT=y
CONFIG_CORESIGHT_DUMMY=y
+CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index c4cc771..f8b5c48 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -228,6 +228,7 @@
}
#define gic_read_typer(c) readq_relaxed_no_log(c)
+#define gic_read_irouter(c) readq_relaxed_no_log(c)
#define gic_write_irouter(v, c) writeq_relaxed_no_log(v, c)
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index c9a2ab4..f035ff6 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -32,11 +32,16 @@
void *module_alloc(unsigned long size)
{
+ gfp_t gfp_mask = GFP_KERNEL;
void *p;
+ /* Silence the initial allocation */
+ if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
+ gfp_mask |= __GFP_NOWARN;
+
p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
module_alloc_base + MODULES_VSIZE,
- GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
+ gfp_mask, PAGE_KERNEL_EXEC, 0,
NUMA_NO_NODE, __builtin_return_address(0));
if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 623dd48..69d3266 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -602,16 +602,6 @@
void (*__smp_cross_call)(const struct cpumask *, unsigned int);
DEFINE_PER_CPU(bool, pending_ipi);
-void smp_cross_call_common(const struct cpumask *cpumask, unsigned int func)
-{
- unsigned int cpu;
-
- for_each_cpu(cpu, cpumask)
- per_cpu(pending_ipi, cpu) = true;
-
- __smp_cross_call(cpumask, func);
-}
-
/*
* Enumerate the possible CPU set from the device tree and build the
* cpu logical map array containing MPIDR values related to logical
@@ -779,6 +769,17 @@
__smp_cross_call(target, ipinr);
}
+static void smp_cross_call_common(const struct cpumask *cpumask,
+ unsigned int func)
+{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, cpumask)
+ per_cpu(pending_ipi, cpu) = true;
+
+ smp_cross_call(cpumask, func);
+}
+
void show_ipi_list(struct seq_file *p, int prec)
{
unsigned int cpu, i;
@@ -825,7 +826,8 @@
void arch_irq_work_raise(void)
{
if (__smp_cross_call)
- smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
+ smp_cross_call_common(cpumask_of(smp_processor_id()),
+ IPI_IRQ_WORK);
}
#endif
diff --git a/block/bio.c b/block/bio.c
index 4f93345..91b6462 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -565,6 +565,15 @@
}
EXPORT_SYMBOL(bio_phys_segments);
+static inline void bio_clone_crypt_key(struct bio *dst, const struct bio *src)
+{
+#ifdef CONFIG_PFK
+ dst->bi_crypt_key = src->bi_crypt_key;
+ dst->bi_iter.bi_dun = src->bi_iter.bi_dun;
+#endif
+ dst->bi_dio_inode = src->bi_dio_inode;
+}
+
/**
* __bio_clone_fast - clone a bio that shares the original bio's biovec
* @bio: destination bio
@@ -589,7 +598,8 @@
bio->bi_opf = bio_src->bi_opf;
bio->bi_iter = bio_src->bi_iter;
bio->bi_io_vec = bio_src->bi_io_vec;
-
+ bio->bi_dio_inode = bio_src->bi_dio_inode;
+ bio_clone_crypt_key(bio, bio_src);
bio_clone_blkcg_association(bio, bio_src);
}
EXPORT_SYMBOL(__bio_clone_fast);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index abde370..f44daa1 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -6,9 +6,9 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>
-
+#include <linux/pfk.h>
#include <trace/events/block.h>
-
+#include <linux/pfk.h>
#include "blk.h"
static struct bio *blk_bio_discard_split(struct request_queue *q,
@@ -725,6 +725,11 @@
}
}
+static bool crypto_not_mergeable(const struct bio *bio, const struct bio *nxt)
+{
+ return (!pfk_allow_merge_bio(bio, nxt));
+}
+
/*
* Has to be called with the request spinlock acquired
*/
@@ -752,6 +757,8 @@
!blk_write_same_mergeable(req->bio, next->bio))
return 0;
+ if (crypto_not_mergeable(req->bio, next->bio))
+ return 0;
/*
* If we are allowed to merge, then append bio list
* from next to rq and release next. merge_requests_fn
@@ -862,6 +869,8 @@
!blk_write_same_mergeable(rq->bio, bio))
return false;
+ if (crypto_not_mergeable(rq->bio, bio))
+ return false;
return true;
}
diff --git a/block/elevator.c b/block/elevator.c
index f7d973a..6dd2ca4 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -425,7 +425,10 @@
/*
* First try one-hit cache.
*/
- if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
+ if (q->last_merge) {
+ if (!elv_bio_merge_ok(q->last_merge, bio))
+ return ELEVATOR_NO_MERGE;
+
ret = blk_try_merge(q->last_merge, bio);
if (ret != ELEVATOR_NO_MERGE) {
*req = q->last_merge;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index ac43d6f..4272868 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -876,8 +876,8 @@
struct device_node *of_node = dev_of_node(dev);
int error;
- if (of_node) {
- error = sysfs_create_link(&dev->kobj, &of_node->kobj,"of_node");
+ if (of_node && of_node_kobj(of_node)) {
+ error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node");
if (error)
dev_warn(dev, "Error %d creating of_node link\n",error);
/* An error here doesn't warrant bringing down the device */
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 4d734bf..7d8605b 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -2764,6 +2764,7 @@
mutex_unlock(&fl->perf_mutex);
mutex_destroy(&fl->perf_mutex);
mutex_destroy(&fl->fl_map_mutex);
+ mutex_destroy(&fl->map_mutex);
kfree(fl);
return 0;
}
@@ -2777,7 +2778,6 @@
pm_qos_remove_request(&fl->pm_qos_req);
if (fl->debugfs_file != NULL)
debugfs_remove(fl->debugfs_file);
- mutex_destroy(&fl->map_mutex);
fastrpc_file_free(fl);
file->private_data = NULL;
}
@@ -3259,6 +3259,28 @@
if (err)
goto bail;
break;
+ case FASTRPC_IOCTL_MMAP_64:
+ K_COPY_FROM_USER(err, 0, &p.mmap, param,
+ sizeof(p.mmap));
+ if (err)
+ goto bail;
+ VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
+ if (err)
+ goto bail;
+ K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
+ if (err)
+ goto bail;
+ break;
+ case FASTRPC_IOCTL_MUNMAP_64:
+ K_COPY_FROM_USER(err, 0, &p.munmap, param,
+ sizeof(p.munmap));
+ if (err)
+ goto bail;
+ VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
+ &p.munmap)));
+ if (err)
+ goto bail;
+ break;
case FASTRPC_IOCTL_MUNMAP_FD:
K_COPY_FROM_USER(err, 0, &p.munmap_fd, param,
sizeof(p.munmap_fd));
diff --git a/drivers/char/adsprpc_compat.c b/drivers/char/adsprpc_compat.c
index 0f07483..804ceda 100644
--- a/drivers/char/adsprpc_compat.c
+++ b/drivers/char/adsprpc_compat.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -39,6 +39,10 @@
_IOWR('R', 11, struct compat_fastrpc_ioctl_invoke_crc)
#define COMPAT_FASTRPC_IOCTL_CONTROL \
_IOWR('R', 12, struct compat_fastrpc_ioctl_control)
+#define COMPAT_FASTRPC_IOCTL_MMAP_64 \
+ _IOWR('R', 14, struct compat_fastrpc_ioctl_mmap_64)
+#define COMPAT_FASTRPC_IOCTL_MUNMAP_64 \
+ _IOWR('R', 15, struct compat_fastrpc_ioctl_munmap_64)
struct compat_remote_buf {
compat_uptr_t pv; /* buffer pointer */
@@ -82,11 +86,24 @@
compat_uptr_t vaddrout; /* dsps virtual address */
};
+struct compat_fastrpc_ioctl_mmap_64 {
+ compat_int_t fd; /* ion fd */
+ compat_uint_t flags; /* flags for dsp to map with */
+ compat_u64 vaddrin; /* optional virtual address */
+ compat_size_t size; /* size */
+ compat_u64 vaddrout; /* dsps virtual address */
+};
+
struct compat_fastrpc_ioctl_munmap {
compat_uptr_t vaddrout; /* address to unmap */
compat_size_t size; /* size */
};
+struct compat_fastrpc_ioctl_munmap_64 {
+ compat_u64 vaddrout; /* address to unmap */
+ compat_size_t size; /* size */
+};
+
struct compat_fastrpc_ioctl_init {
compat_uint_t flags; /* one of FASTRPC_INIT_* macros */
compat_uptr_t file; /* pointer to elf file */
@@ -206,6 +223,28 @@
return err;
}
+static int compat_get_fastrpc_ioctl_mmap_64(
+ struct compat_fastrpc_ioctl_mmap_64 __user *map32,
+ struct fastrpc_ioctl_mmap __user *map)
+{
+ compat_uint_t u;
+ compat_int_t i;
+ compat_size_t s;
+ compat_u64 p;
+ int err;
+
+ err = get_user(i, &map32->fd);
+ err |= put_user(i, &map->fd);
+ err |= get_user(u, &map32->flags);
+ err |= put_user(u, &map->flags);
+ err |= get_user(p, &map32->vaddrin);
+ err |= put_user(p, &map->vaddrin);
+ err |= get_user(s, &map32->size);
+ err |= put_user(s, &map->size);
+
+ return err;
+}
+
static int compat_put_fastrpc_ioctl_mmap(
struct compat_fastrpc_ioctl_mmap __user *map32,
struct fastrpc_ioctl_mmap __user *map)
@@ -219,6 +258,19 @@
return err;
}
+static int compat_put_fastrpc_ioctl_mmap_64(
+ struct compat_fastrpc_ioctl_mmap_64 __user *map32,
+ struct fastrpc_ioctl_mmap __user *map)
+{
+ compat_u64 p;
+ int err;
+
+ err = get_user(p, &map->vaddrout);
+ err |= put_user(p, &map32->vaddrout);
+
+ return err;
+}
+
static int compat_get_fastrpc_ioctl_munmap(
struct compat_fastrpc_ioctl_munmap __user *unmap32,
struct fastrpc_ioctl_munmap __user *unmap)
@@ -235,6 +287,22 @@
return err;
}
+static int compat_get_fastrpc_ioctl_munmap_64(
+ struct compat_fastrpc_ioctl_munmap_64 __user *unmap32,
+ struct fastrpc_ioctl_munmap __user *unmap)
+{
+ compat_u64 p;
+ compat_size_t s;
+ int err;
+
+ err = get_user(p, &unmap32->vaddrout);
+ err |= put_user(p, &unmap->vaddrout);
+ err |= get_user(s, &unmap32->size);
+ err |= put_user(s, &unmap->size);
+
+ return err;
+}
+
static int compat_get_fastrpc_ioctl_perf(
struct compat_fastrpc_ioctl_perf __user *perf32,
struct fastrpc_ioctl_perf __user *perf)
@@ -355,6 +423,27 @@
VERIFY(err, 0 == compat_put_fastrpc_ioctl_mmap(map32, map));
return err;
}
+ case COMPAT_FASTRPC_IOCTL_MMAP_64:
+ {
+ struct compat_fastrpc_ioctl_mmap_64 __user *map32;
+ struct fastrpc_ioctl_mmap __user *map;
+ long ret;
+
+ map32 = compat_ptr(arg);
+ VERIFY(err, NULL != (map = compat_alloc_user_space(
+ sizeof(*map))));
+ if (err)
+ return -EFAULT;
+ VERIFY(err, 0 == compat_get_fastrpc_ioctl_mmap_64(map32, map));
+ if (err)
+ return err;
+ ret = filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_MMAP_64,
+ (unsigned long)map);
+ if (ret)
+ return ret;
+ VERIFY(err, 0 == compat_put_fastrpc_ioctl_mmap_64(map32, map));
+ return err;
+ }
case COMPAT_FASTRPC_IOCTL_MUNMAP:
{
struct compat_fastrpc_ioctl_munmap __user *unmap32;
@@ -372,6 +461,23 @@
return filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_MUNMAP,
(unsigned long)unmap);
}
+ case COMPAT_FASTRPC_IOCTL_MUNMAP_64:
+ {
+ struct compat_fastrpc_ioctl_munmap_64 __user *unmap32;
+ struct fastrpc_ioctl_munmap __user *unmap;
+
+ unmap32 = compat_ptr(arg);
+ VERIFY(err, NULL != (unmap = compat_alloc_user_space(
+ sizeof(*unmap))));
+ if (err)
+ return -EFAULT;
+ VERIFY(err, 0 == compat_get_fastrpc_ioctl_munmap_64(unmap32,
+ unmap));
+ if (err)
+ return err;
+ return filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_MUNMAP_64,
+ (unsigned long)unmap);
+ }
case COMPAT_FASTRPC_IOCTL_INIT:
/* fall through */
case COMPAT_FASTRPC_IOCTL_INIT_ATTRS:
diff --git a/drivers/char/adsprpc_shared.h b/drivers/char/adsprpc_shared.h
index de0dd01..952b87c 100644
--- a/drivers/char/adsprpc_shared.h
+++ b/drivers/char/adsprpc_shared.h
@@ -19,6 +19,8 @@
#define FASTRPC_IOCTL_INVOKE _IOWR('R', 1, struct fastrpc_ioctl_invoke)
#define FASTRPC_IOCTL_MMAP _IOWR('R', 2, struct fastrpc_ioctl_mmap)
#define FASTRPC_IOCTL_MUNMAP _IOWR('R', 3, struct fastrpc_ioctl_munmap)
+#define FASTRPC_IOCTL_MMAP_64 _IOWR('R', 14, struct fastrpc_ioctl_mmap_64)
+#define FASTRPC_IOCTL_MUNMAP_64 _IOWR('R', 15, struct fastrpc_ioctl_munmap_64)
#define FASTRPC_IOCTL_INVOKE_FD _IOWR('R', 4, struct fastrpc_ioctl_invoke_fd)
#define FASTRPC_IOCTL_SETMODE _IOWR('R', 5, uint32_t)
#define FASTRPC_IOCTL_INIT _IOWR('R', 6, struct fastrpc_ioctl_init)
@@ -204,6 +206,11 @@
size_t size; /* size */
};
+struct fastrpc_ioctl_munmap_64 {
+ uint64_t vaddrout; /* address to unmap */
+ size_t size; /* size */
+};
+
struct fastrpc_ioctl_mmap {
int fd; /* ion fd */
uint32_t flags; /* flags for dsp to map with */
@@ -212,6 +219,14 @@
uintptr_t vaddrout; /* dsps virtual address */
};
+struct fastrpc_ioctl_mmap_64 {
+ int fd; /* ion fd */
+ uint32_t flags; /* flags for dsp to map with */
+ uint64_t vaddrin; /* optional virtual address */
+ size_t size; /* size */
+ uint64_t vaddrout; /* dsps virtual address */
+};
+
struct fastrpc_ioctl_munmap_fd {
int fd; /* fd */
uint32_t flags; /* control flags */
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 286418f..a089e7c 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -2713,7 +2713,7 @@
}
}
-static int diag_dci_init_remote(void)
+int diag_dci_init_remote(void)
{
int i;
struct dci_ops_tbl_t *temp = NULL;
@@ -2740,11 +2740,6 @@
return 0;
}
-#else
-static int diag_dci_init_remote(void)
-{
- return 0;
-}
#endif
static int diag_dci_init_ops_tbl(void)
@@ -2754,10 +2749,6 @@
err = diag_dci_init_local();
if (err)
goto err;
- err = diag_dci_init_remote();
- if (err)
- goto err;
-
return 0;
err:
diff --git a/drivers/char/diag/diag_dci.h b/drivers/char/diag/diag_dci.h
index 61eb3f5..2fb0e3f 100644
--- a/drivers/char/diag/diag_dci.h
+++ b/drivers/char/diag/diag_dci.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, 2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -323,6 +323,7 @@
int diag_dci_write_bridge(int token, unsigned char *buf, int len);
int diag_dci_write_done_bridge(int index, unsigned char *buf, int len);
int diag_dci_send_handshake_pkt(int index);
+int diag_dci_init_remote(void);
#endif
#endif
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 54a4d98..a169230 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -970,6 +970,8 @@
diagmem_setsize(POOL_TYPE_QSC_MUX, itemsize_qsc_usb,
poolsize_qsc_usb);
diag_md_mdm_init();
+ if (diag_dci_init_remote())
+ return -ENOMEM;
driver->hdlc_encode_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE, GFP_KERNEL);
if (!driver->hdlc_encode_buf)
return -ENOMEM;
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-12nm-util.c b/drivers/clk/msm/mdss/mdss-dsi-pll-12nm-util.c
index f2ed36c..0bdfeeb 100644
--- a/drivers/clk/msm/mdss/mdss-dsi-pll-12nm-util.c
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-12nm-util.c
@@ -165,7 +165,8 @@
return sel;
}
-static bool pll_is_pll_locked_12nm(struct mdss_pll_resources *pll)
+static bool pll_is_pll_locked_12nm(struct mdss_pll_resources *pll,
+ bool is_handoff)
{
u32 status;
bool pll_locked;
@@ -177,8 +178,9 @@
((status & BIT(1)) > 0),
DSI_PLL_POLL_MAX_READS,
DSI_PLL_POLL_TIMEOUT_US)) {
- pr_err("DSI PLL ndx=%d status=%x failed to Lock\n",
- pll->index, status);
+ if (!is_handoff)
+ pr_err("DSI PLL ndx=%d status=%x failed to Lock\n",
+ pll->index, status);
pll_locked = false;
} else {
pll_locked = true;
@@ -213,7 +215,7 @@
wmb(); /* make sure register committed before enabling branch clocks */
udelay(50); /* h/w recommended delay */
- if (!pll_is_pll_locked_12nm(pll)) {
+ if (!pll_is_pll_locked_12nm(pll, false)) {
pr_err("DSI PLL ndx=%d lock failed!\n",
pll->index);
rc = -EINVAL;
@@ -261,7 +263,7 @@
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_POWERUP_CTRL, data);
ndelay(500); /* h/w recommended delay */
- if (!pll_is_pll_locked_12nm(pll)) {
+ if (!pll_is_pll_locked_12nm(pll, false)) {
pr_err("DSI PLL ndx=%d lock failed!\n",
pll->index);
rc = -EINVAL;
@@ -556,6 +558,142 @@
param->gmp_cntrl = 0x1;
}
+static u32 __mdss_dsi_get_multi_intX100(u64 vco_rate, u32 *rem)
+{
+ u32 reminder = 0;
+ u64 temp = 0;
+ const u32 ref_clk_rate = 19200000, quarterX100 = 25;
+
+ temp = div_u64_rem(vco_rate, ref_clk_rate, &reminder);
+ temp *= 100;
+
+ /*
+ * Multiplication integer needs to be floored in steps of 0.25
+ * Hence multi_intX100 needs to be rounded off in steps of 25
+ */
+ if (reminder < (ref_clk_rate / 4)) {
+ *rem = reminder;
+ return temp;
+ } else if ((reminder >= (ref_clk_rate / 4)) &&
+ reminder < (ref_clk_rate / 2)) {
+ *rem = (reminder - (ref_clk_rate / 4));
+ return (temp + quarterX100);
+ } else if ((reminder >= (ref_clk_rate / 2)) &&
+ (reminder < ((3 * ref_clk_rate) / 4))) {
+ *rem = (reminder - (ref_clk_rate / 2));
+ return (temp + (quarterX100 * 2));
+ }
+
+ *rem = (reminder - ((3 * ref_clk_rate) / 4));
+ return (temp + (quarterX100 * 3));
+}
+
+static u32 __calc_gcd(u32 num1, u32 num2)
+{
+ if (num2 != 0)
+ return __calc_gcd(num2, (num1 % num2));
+ else
+ return num1;
+}
+
+static void mdss_dsi_pll_12nm_calc_ssc(struct mdss_pll_resources *pll,
+ struct dsi_pll_db *pdb)
+{
+ struct dsi_pll_param *param = &pdb->param;
+ u64 multi_intX100 = 0, temp = 0;
+ u32 temp_rem1 = 0, temp_rem2 = 0;
+ const u64 power_2_17 = 131072, power_2_10 = 1024;
+ const u32 ref_clk_rate = 19200000;
+
+ multi_intX100 = __mdss_dsi_get_multi_intX100(pll->vco_current_rate,
+ &temp_rem1);
+
+ /* Calculation for mpll_ssc_peak_i */
+ temp = (multi_intX100 * pll->ssc_ppm * power_2_17);
+ temp = div_u64(temp, 100); /* 100 div for multi_intX100 */
+ param->mpll_ssc_peak_i =
+ (u32) div_u64(temp, 1000000); /*10^6 for SSC PPM */
+
+ /* Calculation for mpll_stepsize_i */
+ param->mpll_stepsize_i = (u32) div_u64((param->mpll_ssc_peak_i *
+ pll->ssc_freq * power_2_10), ref_clk_rate);
+
+ /* Calculation for mpll_mint_i */
+ param->mpll_mint_i = (u32) (div_u64((multi_intX100 * 4), 100) - 32);
+
+ /* Calculation for mpll_frac_den */
+ param->mpll_frac_den = (u32) div_u64(ref_clk_rate,
+ __calc_gcd((u32)pll->vco_current_rate, ref_clk_rate));
+
+ /* Calculation for mpll_frac_quot_i */
+ temp = (temp_rem1 * power_2_17);
+ param->mpll_frac_quot_i =
+ (u32) div_u64_rem(temp, ref_clk_rate, &temp_rem2);
+
+ /* Calculation for mpll_frac_rem */
+ param->mpll_frac_rem = (u32) div_u64(((u64)temp_rem2 *
+ param->mpll_frac_den), ref_clk_rate);
+
+ pr_debug("mpll_ssc_peak_i=%d mpll_stepsize_i=%d mpll_mint_i=%d\n",
+ param->mpll_ssc_peak_i, param->mpll_stepsize_i,
+ param->mpll_mint_i);
+ pr_debug("mpll_frac_den=%d mpll_frac_quot_i=%d mpll_frac_rem=%d",
+ param->mpll_frac_den, param->mpll_frac_quot_i,
+ param->mpll_frac_rem);
+}
+
+static void pll_db_commit_12nm_ssc(struct mdss_pll_resources *pll,
+ struct dsi_pll_db *pdb)
+{
+ void __iomem *pll_base = pll->pll_base;
+ struct dsi_pll_param *param = &pdb->param;
+ char data = 0;
+
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC0, 0x27);
+
+ data = (param->mpll_mint_i & 0xff);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC7, data);
+
+ data = ((param->mpll_mint_i & 0xff00) >> 8);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC8, data);
+
+ data = (param->mpll_ssc_peak_i & 0xff);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC1, data);
+
+ data = ((param->mpll_ssc_peak_i & 0xff00) >> 8);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC2, data);
+
+ data = ((param->mpll_ssc_peak_i & 0xf0000) >> 16);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC3, data);
+
+ data = (param->mpll_stepsize_i & 0xff);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC4, data);
+
+ data = ((param->mpll_stepsize_i & 0xff00) >> 8);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC5, data);
+
+ data = ((param->mpll_stepsize_i & 0x1f0000) >> 16);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC6, data);
+
+ data = (param->mpll_frac_quot_i & 0xff);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC10, data);
+
+ data = ((param->mpll_frac_quot_i & 0xff00) >> 8);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC11, data);
+
+ data = (param->mpll_frac_rem & 0xff);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC12, data);
+
+ data = ((param->mpll_frac_rem & 0xff00) >> 8);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC13, data);
+
+ data = (param->mpll_frac_den & 0xff);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC14, data);
+
+ data = ((param->mpll_frac_den & 0xff00) >> 8);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC15, data);
+}
+
static void pll_db_commit_12nm(struct mdss_pll_resources *pll,
struct dsi_pll_db *pdb)
{
@@ -616,6 +754,9 @@
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PRO_DLY_RELOCK, 0x0c);
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_LOCK_DET_MODE_SEL, 0x02);
+ if (pll->ssc_en)
+ pll_db_commit_12nm_ssc(pll, pdb);
+
pr_debug("pll:%d\n", pll->index);
wmb(); /* make sure register committed before preparing the clocks */
}
@@ -710,7 +851,7 @@
return ret;
}
- if (pll_is_pll_locked_12nm(pll)) {
+ if (pll_is_pll_locked_12nm(pll, true)) {
pll->handoff_resources = true;
pll->pll_on = true;
c->rate = pll_vco_get_rate_12nm(c);
@@ -756,18 +897,27 @@
rc, pll->index);
goto error;
}
+ }
- data = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_SYS_CTRL);
- if (data & BIT(7)) { /* DSI PHY in LP-11 or ULPS */
- rc = dsi_pll_relock(pll);
- if (rc)
- goto error;
- else
- goto end;
- }
+ /*
+ * For cases where DSI PHY is already enabled like:
+ * 1.) LP-11 during static screen
+ * 2.) ULPS during static screen
+ * 3.) Boot up with cont splash enabled where PHY is programmed in LK
+ * Execute the Re-lock sequence to enable the DSI PLL.
+ */
+ data = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_SYS_CTRL);
+ if (data & BIT(7)) {
+ rc = dsi_pll_relock(pll);
+ if (rc)
+ goto error;
+ else
+ goto end;
}
mdss_dsi_pll_12nm_calc_reg(pll, pdb);
+ if (pll->ssc_en)
+ mdss_dsi_pll_12nm_calc_ssc(pll, pdb);
/* commit DSI vco */
pll_db_commit_12nm(pll, pdb);
@@ -802,6 +952,7 @@
{
struct dsi_pll_vco_clk *vco = to_vco_clk(c);
struct mdss_pll_resources *pll = vco->priv;
+ u32 data = 0;
if (!pll) {
pr_err("Dsi pll resources are not available\n");
@@ -813,7 +964,9 @@
return -EINVAL;
}
- MDSS_PLL_REG_W(pll->pll_base, DSIPHY_SSC0, 0x40);
+ data = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_SSC0);
+ data |= BIT(6); /* enable GP_CLK_EN */
+ MDSS_PLL_REG_W(pll->pll_base, DSIPHY_SSC0, data);
wmb(); /* make sure register committed before enabling branch clocks */
return 0;
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.c b/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.c
index 210742b..5d2fa9a 100644
--- a/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.c
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.c
@@ -636,11 +636,13 @@
};
static struct clk_lookup mdss_dsi_pllcc_12nm[] = {
+ CLK_LIST(dsi0pll_vco_clk),
CLK_LIST(dsi0pll_byte_clk_src),
CLK_LIST(dsi0pll_pixel_clk_src),
};
static struct clk_lookup mdss_dsi_pllcc_12nm_1[] = {
+ CLK_LIST(dsi1pll_vco_clk),
CLK_LIST(dsi1pll_byte_clk_src),
CLK_LIST(dsi1pll_pixel_clk_src),
};
@@ -650,6 +652,9 @@
{
int rc = 0, ndx;
struct dsi_pll_db *pdb;
+ int const ssc_freq_min = 30000; /* min. recommended freq. value */
+ int const ssc_freq_max = 33000; /* max. recommended freq. value */
+ int const ssc_ppm_max = 5000; /* max. recommended ppm */
if (!pdev || !pdev->dev.of_node) {
pr_err("Invalid input parameters\n");
@@ -680,6 +685,21 @@
pixel_div_clk_src_ops = clk_ops_div;
pixel_div_clk_src_ops.prepare = dsi_pll_div_prepare;
+ if (pll_res->ssc_en) {
+ if (!pll_res->ssc_freq || (pll_res->ssc_freq < ssc_freq_min) ||
+ (pll_res->ssc_freq > ssc_freq_max)) {
+ pll_res->ssc_freq = ssc_freq_min;
+ pr_debug("SSC frequency out of recommended range. Set to default=%d\n",
+ pll_res->ssc_freq);
+ }
+
+ if (!pll_res->ssc_ppm || (pll_res->ssc_ppm > ssc_ppm_max)) {
+ pll_res->ssc_ppm = ssc_ppm_max;
+ pr_debug("SSC PPM out of recommended range. Set to default=%d\n",
+ pll_res->ssc_ppm);
+ }
+ }
+
/* Set client data to mux, div and vco clocks. */
if (pll_res->index == DSI_PLL_1) {
dsi1pll_byte_clk_src.priv = pll_res;
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.h b/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.h
index 6912ff4..0974717 100644
--- a/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.h
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.h
@@ -37,6 +37,22 @@
#define DSIPHY_PLL_LOOP_DIV_RATIO_1 0x2e8
#define DSIPHY_SLEWRATE_DDL_CYC_FRQ_ADJ_1 0x328
#define DSIPHY_SSC0 0x394
+#define DSIPHY_SSC7 0x3b0
+#define DSIPHY_SSC8 0x3b4
+#define DSIPHY_SSC1 0x398
+#define DSIPHY_SSC2 0x39c
+#define DSIPHY_SSC3 0x3a0
+#define DSIPHY_SSC4 0x3a4
+#define DSIPHY_SSC5 0x3a8
+#define DSIPHY_SSC6 0x3ac
+#define DSIPHY_SSC10 0x360
+#define DSIPHY_SSC11 0x364
+#define DSIPHY_SSC12 0x368
+#define DSIPHY_SSC13 0x36c
+#define DSIPHY_SSC14 0x370
+#define DSIPHY_SSC15 0x374
+#define DSIPHY_SSC7 0x3b0
+#define DSIPHY_SSC8 0x3b4
#define DSIPHY_SSC9 0x3b8
#define DSIPHY_STAT0 0x3e0
#define DSIPHY_CTRL0 0x3e8
@@ -58,6 +74,14 @@
u32 post_div_mux;
u32 pixel_divhf;
u32 fsm_ovr_ctrl;
+
+ /* ssc_params */
+ u32 mpll_ssc_peak_i;
+ u32 mpll_stepsize_i;
+ u32 mpll_mint_i;
+ u32 mpll_frac_den;
+ u32 mpll_frac_quot_i;
+ u32 mpll_frac_rem;
};
enum {
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index 836c25c..7f9ba03 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -833,6 +833,7 @@
};
static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+ F(8000000, P_CAM_CC_PLL2_OUT_EVEN, 10, 1, 6),
F(19200000, P_BI_TCXO, 1, 0, 0),
F(24000000, P_CAM_CC_PLL2_OUT_EVEN, 10, 1, 2),
F(33333333, P_CAM_CC_PLL0_OUT_EVEN, 2, 1, 9),
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index d82ce73..7279448 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1654,7 +1654,10 @@
if (policy) {
down_read(&policy->rwsem);
- ret_freq = __cpufreq_get(policy);
+
+ if (!policy_is_inactive(policy))
+ ret_freq = __cpufreq_get(policy);
+
up_read(&policy->rwsem);
cpufreq_cpu_put(policy);
@@ -2397,6 +2400,11 @@
down_write(&policy->rwsem);
+ if (policy_is_inactive(policy)) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
pr_debug("updating policy for CPU %u\n", cpu);
memcpy(&new_policy, policy, sizeof(*policy));
new_policy.min = policy->user_policy.min;
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index cb3c48a..99ad22e 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -27,4 +27,8 @@
# POWERPC drivers
obj-$(CONFIG_PSERIES_CPUIDLE) += cpuidle-pseries.o
obj-$(CONFIG_POWERNV_CPUIDLE) += cpuidle-powernv.o
+ifeq ($(CONFIG_MSM_PM_LEGACY), y)
+obj-y += lpm-levels-legacy.o lpm-levels-of-legacy.o lpm-workarounds.o
+else
obj-$(CONFIG_MSM_PM) += lpm-levels.o lpm-levels-of.o
+endif
diff --git a/drivers/cpuidle/lpm-levels-legacy.c b/drivers/cpuidle/lpm-levels-legacy.c
new file mode 100644
index 0000000..006a5ef
--- /dev/null
+++ b/drivers/cpuidle/lpm-levels-legacy.c
@@ -0,0 +1,1533 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/tick.h>
+#include <linux/suspend.h>
+#include <linux/pm_qos.h>
+#include <linux/of_platform.h>
+#include <linux/smp.h>
+#include <linux/remote_spinlock.h>
+#include <linux/msm_remote_spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/coresight-cti.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/cpu_pm.h>
+#include <soc/qcom/spm.h>
+#include <soc/qcom/pm-legacy.h>
+#include <soc/qcom/rpm-notifier.h>
+#include <soc/qcom/event_timer.h>
+#include <soc/qcom/lpm-stats.h>
+#include <soc/qcom/lpm_levels.h>
+#include <soc/qcom/jtag.h>
+#include <asm/cputype.h>
+#include <asm/arch_timer.h>
+#include <asm/cacheflush.h>
+#include <asm/suspend.h>
+#include "lpm-levels-legacy.h"
+#include "lpm-workarounds.h"
+#include <trace/events/power.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/trace_msm_low_power.h>
+#if defined(CONFIG_COMMON_CLK)
+#include "../clk/clk.h"
+#elif defined(CONFIG_COMMON_CLK_MSM)
+#include "../../drivers/clk/msm/clock.h"
+#endif /* CONFIG_COMMON_CLK */
+#include <soc/qcom/minidump.h>
+
+#define SCLK_HZ (32768)
+#define SCM_HANDOFF_LOCK_ID "S:7"
+#define PSCI_POWER_STATE(reset) (reset << 30)
+#define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24)
+static remote_spinlock_t scm_handoff_lock;
+
+enum {
+ MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0),
+ MSM_LPM_LVL_DBG_IDLE_LIMITS = BIT(1),
+};
+
+enum debug_event {
+ CPU_ENTER,
+ CPU_EXIT,
+ CLUSTER_ENTER,
+ CLUSTER_EXIT,
+ PRE_PC_CB,
+ CPU_HP_STARTING,
+ CPU_HP_DYING,
+};
+
+struct lpm_debug {
+ cycle_t time;
+ enum debug_event evt;
+ int cpu;
+ uint32_t arg1;
+ uint32_t arg2;
+ uint32_t arg3;
+ uint32_t arg4;
+};
+
+static struct system_pm_ops *sys_pm_ops;
+struct lpm_cluster *lpm_root_node;
+
+static DEFINE_PER_CPU(struct lpm_cluster*, cpu_cluster);
+static bool suspend_in_progress;
+static struct hrtimer lpm_hrtimer;
+static struct lpm_debug *lpm_debug;
+static phys_addr_t lpm_debug_phys;
+
+static const int num_dbg_elements = 0x100;
+
+static void cluster_unprepare(struct lpm_cluster *cluster,
+ const struct cpumask *cpu, int child_idx, bool from_idle,
+ int64_t time);
+static void cluster_prepare(struct lpm_cluster *cluster,
+ const struct cpumask *cpu, int child_idx, bool from_idle,
+ int64_t time);
+
+static bool menu_select;
+module_param_named(
+ menu_select, menu_select, bool, 0664
+);
+
+static bool print_parsed_dt;
+module_param_named(
+ print_parsed_dt, print_parsed_dt, bool, 0664
+);
+
+static bool sleep_disabled;
+module_param_named(sleep_disabled,
+ sleep_disabled, bool, 0664);
+
+s32 msm_cpuidle_get_deep_idle_latency(void)
+{
+ return 10;
+}
+EXPORT_SYMBOL(msm_cpuidle_get_deep_idle_latency);
+
+uint32_t register_system_pm_ops(struct system_pm_ops *pm_ops)
+{
+ if (sys_pm_ops)
+ return -EUSERS;
+
+ sys_pm_ops = pm_ops;
+
+ return 0;
+}
+
+static uint32_t least_cluster_latency(struct lpm_cluster *cluster,
+ struct latency_level *lat_level)
+{
+ struct list_head *list;
+ struct lpm_cluster_level *level;
+ struct lpm_cluster *n;
+ struct power_params *pwr_params;
+ uint32_t latency = 0;
+ int i;
+
+ if (!cluster->list.next) {
+ for (i = 0; i < cluster->nlevels; i++) {
+ level = &cluster->levels[i];
+ pwr_params = &level->pwr;
+ if (lat_level->reset_level == level->reset_level) {
+ if ((latency > pwr_params->latency_us)
+ || (!latency))
+ latency = pwr_params->latency_us;
+ break;
+ }
+ }
+ } else {
+ list_for_each(list, &cluster->parent->child) {
+ n = list_entry(list, typeof(*n), list);
+ if (lat_level->level_name) {
+ if (strcmp(lat_level->level_name,
+ n->cluster_name))
+ continue;
+ }
+ for (i = 0; i < n->nlevels; i++) {
+ level = &n->levels[i];
+ pwr_params = &level->pwr;
+ if (lat_level->reset_level ==
+ level->reset_level) {
+ if ((latency > pwr_params->latency_us)
+ || (!latency))
+ latency =
+ pwr_params->latency_us;
+ break;
+ }
+ }
+ }
+ }
+ return latency;
+}
+
+static uint32_t least_cpu_latency(struct list_head *child,
+ struct latency_level *lat_level)
+{
+ struct list_head *list;
+ struct lpm_cpu_level *level;
+ struct power_params *pwr_params;
+ struct lpm_cpu *cpu;
+ struct lpm_cluster *n;
+ uint32_t latency = 0;
+ int i;
+
+ list_for_each(list, child) {
+ n = list_entry(list, typeof(*n), list);
+ if (lat_level->level_name) {
+ if (strcmp(lat_level->level_name, n->cluster_name))
+ continue;
+ }
+ cpu = n->cpu;
+ for (i = 0; i < cpu->nlevels; i++) {
+ level = &cpu->levels[i];
+ pwr_params = &level->pwr;
+ if (lat_level->reset_level == level->reset_level) {
+ if ((latency > pwr_params->latency_us)
+ || (!latency))
+ latency = pwr_params->latency_us;
+ break;
+ }
+ }
+ }
+ return latency;
+}
+
+static struct lpm_cluster *cluster_aff_match(struct lpm_cluster *cluster,
+ int affinity_level)
+{
+ struct lpm_cluster *n;
+
+ if ((cluster->aff_level == affinity_level)
+ || ((cluster->cpu) && (affinity_level == 0)))
+ return cluster;
+ else if (!cluster->cpu) {
+ n = list_entry(cluster->child.next, typeof(*n), list);
+ return cluster_aff_match(n, affinity_level);
+ } else
+ return NULL;
+}
+
+int lpm_get_latency(struct latency_level *level, uint32_t *latency)
+{
+ struct lpm_cluster *cluster;
+ uint32_t val;
+
+ if (!lpm_root_node) {
+ pr_err("%s: lpm_probe not completed\n", __func__);
+ return -EAGAIN;
+ }
+
+ if ((level->affinity_level < 0)
+ || (level->affinity_level > lpm_root_node->aff_level)
+ || (level->reset_level < LPM_RESET_LVL_RET)
+ || (level->reset_level > LPM_RESET_LVL_PC)
+ || !latency)
+ return -EINVAL;
+
+ cluster = cluster_aff_match(lpm_root_node, level->affinity_level);
+ if (!cluster) {
+ pr_err("%s:No matching cluster found for affinity_level:%d\n",
+ __func__, level->affinity_level);
+ return -EINVAL;
+ }
+
+ if (level->affinity_level == 0)
+ val = least_cpu_latency(&cluster->parent->child, level);
+ else
+ val = least_cluster_latency(cluster, level);
+
+ if (!val) {
+ pr_err("%s:No mode with affinity_level:%d reset_level:%d\n",
+ __func__, level->affinity_level, level->reset_level);
+ return -EINVAL;
+ }
+
+ *latency = val;
+
+ return 0;
+}
+EXPORT_SYMBOL(lpm_get_latency);
+
+static void update_debug_pc_event(enum debug_event event, uint32_t arg1,
+ uint32_t arg2, uint32_t arg3, uint32_t arg4)
+{
+ struct lpm_debug *dbg;
+ int idx;
+ static DEFINE_SPINLOCK(debug_lock);
+ static int pc_event_index;
+
+ if (!lpm_debug)
+ return;
+
+ spin_lock(&debug_lock);
+ idx = pc_event_index++;
+ dbg = &lpm_debug[idx & (num_dbg_elements - 1)];
+
+ dbg->evt = event;
+ dbg->time = arch_counter_get_cntpct();
+ dbg->cpu = raw_smp_processor_id();
+ dbg->arg1 = arg1;
+ dbg->arg2 = arg2;
+ dbg->arg3 = arg3;
+ dbg->arg4 = arg4;
+ spin_unlock(&debug_lock);
+}
+
+static enum hrtimer_restart lpm_hrtimer_cb(struct hrtimer *h)
+{
+ return HRTIMER_NORESTART;
+}
+
+static void msm_pm_set_timer(uint32_t modified_time_us)
+{
+ u64 modified_time_ns = modified_time_us * NSEC_PER_USEC;
+ ktime_t modified_ktime = ns_to_ktime(modified_time_ns);
+
+ lpm_hrtimer.function = lpm_hrtimer_cb;
+ hrtimer_start(&lpm_hrtimer, modified_ktime, HRTIMER_MODE_REL_PINNED);
+}
+
+int set_l2_mode(struct low_power_ops *ops, int mode,
+ struct lpm_cluster_level *level)
+{
+ int lpm = mode;
+ int rc = 0;
+ bool notify_rpm = level->notify_rpm;
+ struct low_power_ops *cpu_ops = per_cpu(cpu_cluster,
+ smp_processor_id())->lpm_dev;
+
+ if (cpu_ops->tz_flag & MSM_SCM_L2_OFF ||
+ cpu_ops->tz_flag & MSM_SCM_L2_GDHS)
+ coresight_cti_ctx_restore();
+
+ switch (mode) {
+ case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
+ case MSM_SPM_MODE_POWER_COLLAPSE:
+ case MSM_SPM_MODE_FASTPC:
+ if (level->no_cache_flush)
+ cpu_ops->tz_flag = MSM_SCM_L2_GDHS;
+ else
+ cpu_ops->tz_flag = MSM_SCM_L2_OFF;
+ coresight_cti_ctx_save();
+ break;
+ case MSM_SPM_MODE_GDHS:
+ cpu_ops->tz_flag = MSM_SCM_L2_GDHS;
+ coresight_cti_ctx_save();
+ break;
+ case MSM_SPM_MODE_CLOCK_GATING:
+ case MSM_SPM_MODE_RETENTION:
+ case MSM_SPM_MODE_DISABLED:
+ cpu_ops->tz_flag = MSM_SCM_L2_ON;
+ break;
+ default:
+ cpu_ops->tz_flag = MSM_SCM_L2_ON;
+ lpm = MSM_SPM_MODE_DISABLED;
+ break;
+ }
+
+ if (lpm_wa_get_skip_l2_spm())
+ rc = msm_spm_config_low_power_mode_addr(ops->spm, lpm,
+ notify_rpm);
+ else
+ rc = msm_spm_config_low_power_mode(ops->spm, lpm, notify_rpm);
+
+ if (rc)
+ pr_err("%s: Failed to set L2 low power mode %d, ERR %d",
+ __func__, lpm, rc);
+
+ return rc;
+}
+
+int set_l3_mode(struct low_power_ops *ops, int mode,
+ struct lpm_cluster_level *level)
+{
+ bool notify_rpm = level->notify_rpm;
+ struct low_power_ops *cpu_ops = per_cpu(cpu_cluster,
+ smp_processor_id())->lpm_dev;
+
+ switch (mode) {
+ case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
+ case MSM_SPM_MODE_POWER_COLLAPSE:
+ case MSM_SPM_MODE_FASTPC:
+ cpu_ops->tz_flag |= MSM_SCM_L3_PC_OFF;
+ break;
+ default:
+ break;
+ }
+ return msm_spm_config_low_power_mode(ops->spm, mode, notify_rpm);
+}
+
+
+int set_system_mode(struct low_power_ops *ops, int mode,
+ struct lpm_cluster_level *level)
+{
+ bool notify_rpm = level->notify_rpm;
+
+ return msm_spm_config_low_power_mode(ops->spm, mode, notify_rpm);
+}
+
+static int set_device_mode(struct lpm_cluster *cluster, int ndevice,
+ struct lpm_cluster_level *level)
+{
+ struct low_power_ops *ops;
+
+ if (use_psci)
+ return 0;
+
+ ops = &cluster->lpm_dev[ndevice];
+ if (ops && ops->set_mode)
+ return ops->set_mode(ops, level->mode[ndevice],
+ level);
+ else
+ return -EINVAL;
+}
+
+static int cpu_power_select(struct cpuidle_device *dev,
+ struct lpm_cpu *cpu)
+{
+ int best_level = 0;
+ uint32_t latency_us = pm_qos_request_for_cpu(PM_QOS_CPU_DMA_LATENCY,
+ dev->cpu);
+ s64 sleep_us = ktime_to_us(tick_nohz_get_sleep_length());
+ uint32_t modified_time_us = 0;
+ uint32_t next_event_us = 0;
+ int i;
+ uint32_t lvl_latency_us = 0;
+ uint32_t *residency = get_per_cpu_max_residency(dev->cpu);
+
+ if (!cpu)
+ return best_level;
+
+ if ((sleep_disabled && !cpu_isolated(dev->cpu)) || sleep_us < 0)
+ return 0;
+
+ next_event_us = (uint32_t)(ktime_to_us(get_next_event_time(dev->cpu)));
+
+ for (i = 0; i < cpu->nlevels; i++) {
+ struct lpm_cpu_level *level = &cpu->levels[i];
+ struct power_params *pwr_params = &level->pwr;
+ uint32_t next_wakeup_us = (uint32_t)sleep_us;
+ enum msm_pm_sleep_mode mode = level->mode;
+ bool allow;
+
+ allow = lpm_cpu_mode_allow(dev->cpu, i, true);
+
+ if (!allow)
+ continue;
+
+ lvl_latency_us = pwr_params->latency_us;
+
+ if (latency_us < lvl_latency_us)
+ break;
+
+ if (next_event_us) {
+ if (next_event_us < lvl_latency_us)
+ break;
+
+ if (((next_event_us - lvl_latency_us) < sleep_us) ||
+ (next_event_us < sleep_us))
+ next_wakeup_us = next_event_us - lvl_latency_us;
+ }
+
+ best_level = i;
+
+ if (next_event_us && next_event_us < sleep_us &&
+ (mode != MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT))
+ modified_time_us
+ = next_event_us - lvl_latency_us;
+ else
+ modified_time_us = 0;
+
+ if (next_wakeup_us <= residency[i])
+ break;
+ }
+
+ if (modified_time_us)
+ msm_pm_set_timer(modified_time_us);
+
+ trace_cpu_power_select(best_level, sleep_us, latency_us, next_event_us);
+
+ return best_level;
+}
+
+static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster,
+ struct cpumask *mask, bool from_idle)
+{
+ int cpu;
+ int next_cpu = raw_smp_processor_id();
+ ktime_t next_event;
+ struct cpumask online_cpus_in_cluster;
+
+ next_event.tv64 = KTIME_MAX;
+ if (!from_idle) {
+ if (mask)
+ cpumask_copy(mask, cpumask_of(raw_smp_processor_id()));
+ return ~0ULL;
+ }
+
+ cpumask_and(&online_cpus_in_cluster,
+ &cluster->num_children_in_sync, cpu_online_mask);
+
+ for_each_cpu(cpu, &online_cpus_in_cluster) {
+ ktime_t *next_event_c;
+
+ next_event_c = get_next_event_cpu(cpu);
+ if (next_event_c->tv64 < next_event.tv64) {
+ next_event.tv64 = next_event_c->tv64;
+ next_cpu = cpu;
+ }
+ }
+
+ if (mask)
+ cpumask_copy(mask, cpumask_of(next_cpu));
+
+
+ if (ktime_to_us(next_event) > ktime_to_us(ktime_get()))
+ return ktime_to_us(ktime_sub(next_event, ktime_get()));
+ else
+ return 0;
+}
+
+static int cluster_select(struct lpm_cluster *cluster, bool from_idle)
+{
+ int best_level = -1;
+ int i;
+ struct cpumask mask;
+ uint32_t latency_us = ~0U;
+ uint32_t sleep_us;
+
+ if (!cluster)
+ return -EINVAL;
+
+ sleep_us = (uint32_t)get_cluster_sleep_time(cluster, NULL, from_idle);
+
+ if (cpumask_and(&mask, cpu_online_mask, &cluster->child_cpus))
+ latency_us = pm_qos_request_for_cpumask(PM_QOS_CPU_DMA_LATENCY,
+ &mask);
+
+ /*
+ * If atleast one of the core in the cluster is online, the cluster
+ * low power modes should be determined by the idle characteristics
+ * even if the last core enters the low power mode as a part of
+ * hotplug.
+ */
+
+ if (!from_idle && num_online_cpus() > 1 &&
+ cpumask_intersects(&cluster->child_cpus, cpu_online_mask))
+ from_idle = true;
+
+ for (i = 0; i < cluster->nlevels; i++) {
+ struct lpm_cluster_level *level = &cluster->levels[i];
+ struct power_params *pwr_params = &level->pwr;
+
+ if (!lpm_cluster_mode_allow(cluster, i, from_idle))
+ continue;
+
+ if (level->last_core_only &&
+ cpumask_weight(cpu_online_mask) > 1)
+ continue;
+
+ if (!cpumask_equal(&cluster->num_children_in_sync,
+ &level->num_cpu_votes))
+ continue;
+
+ if (from_idle && latency_us < pwr_params->latency_us)
+ break;
+
+ if (sleep_us < pwr_params->time_overhead_us)
+ break;
+
+ if (suspend_in_progress && from_idle && level->notify_rpm)
+ continue;
+
+ if (level->notify_rpm) {
+ if (!(sys_pm_ops && sys_pm_ops->sleep_allowed))
+ continue;
+ if (!sys_pm_ops->sleep_allowed())
+ continue;
+ }
+
+ best_level = i;
+
+ if (from_idle && sleep_us <= pwr_params->max_residency)
+ break;
+ }
+
+ return best_level;
+}
+
+static void cluster_notify(struct lpm_cluster *cluster,
+ struct lpm_cluster_level *level, bool enter)
+{
+ if (level->is_reset && enter)
+ cpu_cluster_pm_enter(cluster->aff_level);
+ else if (level->is_reset && !enter)
+ cpu_cluster_pm_exit(cluster->aff_level);
+}
+
+static unsigned int get_next_online_cpu(bool from_idle)
+{
+ unsigned int cpu;
+ ktime_t next_event;
+ unsigned int next_cpu = raw_smp_processor_id();
+
+ if (!from_idle)
+ return next_cpu;
+ next_event.tv64 = KTIME_MAX;
+ for_each_online_cpu(cpu) {
+ ktime_t *next_event_c;
+
+ next_event_c = get_next_event_cpu(cpu);
+ if (next_event_c->tv64 < next_event.tv64) {
+ next_event.tv64 = next_event_c->tv64;
+ next_cpu = cpu;
+ }
+ }
+ return next_cpu;
+}
+
+static int cluster_configure(struct lpm_cluster *cluster, int idx,
+ bool from_idle)
+{
+ struct lpm_cluster_level *level = &cluster->levels[idx];
+ struct cpumask cpumask;
+ unsigned int cpu;
+ int ret, i;
+
+ if (!cpumask_equal(&cluster->num_children_in_sync, &cluster->child_cpus)
+ || is_IPI_pending(&cluster->num_children_in_sync)) {
+ return -EPERM;
+ }
+
+ if (idx != cluster->default_level) {
+ update_debug_pc_event(CLUSTER_ENTER, idx,
+ cluster->num_children_in_sync.bits[0],
+ cluster->child_cpus.bits[0], from_idle);
+ trace_cluster_enter(cluster->cluster_name, idx,
+ cluster->num_children_in_sync.bits[0],
+ cluster->child_cpus.bits[0], from_idle);
+ lpm_stats_cluster_enter(cluster->stats, idx);
+ }
+
+ for (i = 0; i < cluster->ndevices; i++) {
+ ret = set_device_mode(cluster, i, level);
+ if (ret)
+ goto failed_set_mode;
+ }
+
+ if (level->notify_rpm) {
+ struct cpumask *nextcpu;
+
+ cpu = get_next_online_cpu(from_idle);
+ cpumask_copy(&cpumask, cpumask_of(cpu));
+ nextcpu = level->disable_dynamic_routing ? NULL : &cpumask;
+
+ if (sys_pm_ops && sys_pm_ops->enter)
+ if ((sys_pm_ops->enter(nextcpu)))
+ return -EBUSY;
+
+ if (cluster->no_saw_devices && !use_psci)
+ msm_spm_set_rpm_hs(true);
+ }
+
+ /* Notify cluster enter event after successfully config completion */
+ cluster_notify(cluster, level, true);
+
+ cluster->last_level = idx;
+ return 0;
+
+failed_set_mode:
+
+ for (i = 0; i < cluster->ndevices; i++) {
+ int rc = 0;
+
+ level = &cluster->levels[cluster->default_level];
+ rc = set_device_mode(cluster, i, level);
+ WARN_ON(rc);
+ }
+ return ret;
+}
+
+static void cluster_prepare(struct lpm_cluster *cluster,
+ const struct cpumask *cpu, int child_idx, bool from_idle,
+ int64_t start_time)
+{
+ int i;
+
+ if (!cluster)
+ return;
+
+ if (cluster->min_child_level > child_idx)
+ return;
+
+ spin_lock(&cluster->sync_lock);
+ cpumask_or(&cluster->num_children_in_sync, cpu,
+ &cluster->num_children_in_sync);
+
+ for (i = 0; i < cluster->nlevels; i++) {
+ struct lpm_cluster_level *lvl = &cluster->levels[i];
+
+ if (child_idx >= lvl->min_child_level)
+ cpumask_or(&lvl->num_cpu_votes, cpu,
+ &lvl->num_cpu_votes);
+ }
+
+ /*
+ * cluster_select() does not make any configuration changes. So its ok
+ * to release the lock here. If a core wakes up for a rude request,
+ * it need not wait for another to finish its cluster selection and
+ * configuration process
+ */
+
+ if (!cpumask_equal(&cluster->num_children_in_sync,
+ &cluster->child_cpus))
+ goto failed;
+
+ i = cluster_select(cluster, from_idle);
+
+ if (i < 0)
+ goto failed;
+
+ if (cluster_configure(cluster, i, from_idle))
+ goto failed;
+
+ cluster->stats->sleep_time = start_time;
+ cluster_prepare(cluster->parent, &cluster->num_children_in_sync, i,
+ from_idle, start_time);
+
+ spin_unlock(&cluster->sync_lock);
+
+ if (!use_psci) {
+ struct lpm_cluster_level *level = &cluster->levels[i];
+
+ if (level->notify_rpm)
+ if (sys_pm_ops && sys_pm_ops->update_wakeup)
+ sys_pm_ops->update_wakeup(from_idle);
+ }
+
+ return;
+failed:
+ spin_unlock(&cluster->sync_lock);
+ cluster->stats->sleep_time = 0;
+}
+
+static void cluster_unprepare(struct lpm_cluster *cluster,
+ const struct cpumask *cpu, int child_idx, bool from_idle,
+ int64_t end_time)
+{
+ struct lpm_cluster_level *level;
+ bool first_cpu;
+ int last_level, i, ret;
+
+ if (!cluster)
+ return;
+
+ if (cluster->min_child_level > child_idx)
+ return;
+
+ spin_lock(&cluster->sync_lock);
+ last_level = cluster->default_level;
+ first_cpu = cpumask_equal(&cluster->num_children_in_sync,
+ &cluster->child_cpus);
+ cpumask_andnot(&cluster->num_children_in_sync,
+ &cluster->num_children_in_sync, cpu);
+
+ for (i = 0; i < cluster->nlevels; i++) {
+ struct lpm_cluster_level *lvl = &cluster->levels[i];
+
+ if (child_idx >= lvl->min_child_level)
+ cpumask_andnot(&lvl->num_cpu_votes,
+ &lvl->num_cpu_votes, cpu);
+ }
+
+ if (!first_cpu || cluster->last_level == cluster->default_level)
+ goto unlock_return;
+
+ if (cluster->stats->sleep_time)
+ cluster->stats->sleep_time = end_time -
+ cluster->stats->sleep_time;
+ lpm_stats_cluster_exit(cluster->stats, cluster->last_level, true);
+
+ level = &cluster->levels[cluster->last_level];
+ if (level->notify_rpm) {
+ if (sys_pm_ops && sys_pm_ops->exit)
+ sys_pm_ops->exit();
+
+ /* If RPM bumps up CX to turbo, unvote CX turbo vote
+ * during exit of rpm assisted power collapse to
+ * reduce the power impact
+ */
+ lpm_wa_cx_unvote_send();
+
+ if (cluster->no_saw_devices && !use_psci)
+ msm_spm_set_rpm_hs(false);
+
+ }
+
+ update_debug_pc_event(CLUSTER_EXIT, cluster->last_level,
+ cluster->num_children_in_sync.bits[0],
+ cluster->child_cpus.bits[0], from_idle);
+ trace_cluster_exit(cluster->cluster_name, cluster->last_level,
+ cluster->num_children_in_sync.bits[0],
+ cluster->child_cpus.bits[0], from_idle);
+
+ last_level = cluster->last_level;
+ cluster->last_level = cluster->default_level;
+
+ for (i = 0; i < cluster->ndevices; i++) {
+ level = &cluster->levels[cluster->default_level];
+ ret = set_device_mode(cluster, i, level);
+
+ WARN_ON(ret);
+
+ }
+
+ cluster_notify(cluster, &cluster->levels[last_level], false);
+ cluster_unprepare(cluster->parent, &cluster->child_cpus,
+ last_level, from_idle, end_time);
+unlock_return:
+ spin_unlock(&cluster->sync_lock);
+}
+
+static inline void cpu_prepare(struct lpm_cluster *cluster, int cpu_index,
+ bool from_idle)
+{
+ struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index];
+ bool jtag_save_restore =
+ cluster->cpu->levels[cpu_index].jtag_save_restore;
+
+ /* Use broadcast timer for aggregating sleep mode within a cluster.
+ * A broadcast timer could be used in the following scenarios
+ * 1) The architected timer HW gets reset during certain low power
+ * modes and the core relies on a external(broadcast) timer to wake up
+ * from sleep. This information is passed through device tree.
+ * 2) The CPU low power mode could trigger a system low power mode.
+ * The low power module relies on Broadcast timer to aggregate the
+ * next wakeup within a cluster, in which case, CPU switches over to
+ * use broadcast timer.
+ */
+ if (from_idle && (cpu_level->use_bc_timer ||
+ (cpu_index >= cluster->min_child_level)))
+ tick_broadcast_enter();
+
+ if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
+ || (cpu_level->mode ==
+ MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)
+ || (cpu_level->is_reset)))
+ cpu_pm_enter();
+
+ /*
+ * Save JTAG registers for 8996v1.0 & 8996v2.x in C4 LPM
+ */
+ if (jtag_save_restore)
+ msm_jtag_save_state();
+}
+
+static inline void cpu_unprepare(struct lpm_cluster *cluster, int cpu_index,
+ bool from_idle)
+{
+ struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index];
+ bool jtag_save_restore =
+ cluster->cpu->levels[cpu_index].jtag_save_restore;
+
+ if (from_idle && (cpu_level->use_bc_timer ||
+ (cpu_index >= cluster->min_child_level)))
+ tick_broadcast_exit();
+
+ if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
+ || (cpu_level->mode ==
+ MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)
+ || cpu_level->is_reset))
+ cpu_pm_exit();
+
+ /*
+ * Restore JTAG registers for 8996v1.0 & 8996v2.x in C4 LPM
+ */
+ if (jtag_save_restore)
+ msm_jtag_restore_state();
+}
+
+#if defined(CONFIG_ARM_PSCI) || !defined(CONFIG_CPU_V7)
+static int get_cluster_id(struct lpm_cluster *cluster, int *aff_lvl)
+{
+ int state_id = 0;
+
+ if (!cluster)
+ return 0;
+
+ spin_lock(&cluster->sync_lock);
+
+ if (!cpumask_equal(&cluster->num_children_in_sync,
+ &cluster->child_cpus))
+ goto unlock_and_return;
+
+ state_id |= get_cluster_id(cluster->parent, aff_lvl);
+
+ if (cluster->last_level != cluster->default_level) {
+ struct lpm_cluster_level *level
+ = &cluster->levels[cluster->last_level];
+
+ state_id |= (level->psci_id & cluster->psci_mode_mask)
+ << cluster->psci_mode_shift;
+ (*aff_lvl)++;
+ }
+unlock_and_return:
+ spin_unlock(&cluster->sync_lock);
+ return state_id;
+}
+#endif
+
+#if !defined(CONFIG_CPU_V7)
+asmlinkage int __invoke_psci_fn_smc(u64, u64, u64, u64);
+static bool psci_enter_sleep(struct lpm_cluster *cluster,
+ int idx, bool from_idle)
+
+{
+ bool ret;
+ /*
+ * idx = 0 is the default LPM state
+ */
+ if (!idx) {
+ stop_critical_timings();
+ wfi();
+ start_critical_timings();
+ ret = true;
+ } else {
+ int affinity_level = 0;
+ int state_id = get_cluster_id(cluster, &affinity_level);
+ int power_state =
+ PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset);
+ bool success = false;
+
+ if (cluster->cpu->levels[idx].hyp_psci) {
+ stop_critical_timings();
+ __invoke_psci_fn_smc(0xC4000021, 0, 0, 0);
+ start_critical_timings();
+ return 1;
+ }
+
+ affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
+ state_id |= (power_state | affinity_level
+ | cluster->cpu->levels[idx].psci_id);
+
+ update_debug_pc_event(CPU_ENTER, state_id,
+ 0xdeaffeed, 0xdeaffeed, true);
+ stop_critical_timings();
+ success = !arm_cpuidle_suspend(state_id);
+ start_critical_timings();
+ update_debug_pc_event(CPU_EXIT, state_id,
+ success, 0xdeaffeed, true);
+ ret = success;
+ }
+ return ret;
+}
+#elif defined(CONFIG_ARM_PSCI)
+static bool psci_enter_sleep(struct lpm_cluster *cluster,
+ int idx, bool from_idle)
+{
+ bool ret;
+
+ if (!idx) {
+ stop_critical_timings();
+ wfi();
+ start_critical_timings();
+ ret = true;
+ } else {
+ int affinity_level = 0;
+ int state_id = get_cluster_id(cluster, &affinity_level);
+ int power_state =
+ PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset);
+ bool success = false;
+
+ affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
+ state_id |= (power_state | affinity_level
+ | cluster->cpu->levels[idx].psci_id);
+
+ update_debug_pc_event(CPU_ENTER, state_id,
+ 0xdeaffeed, 0xdeaffeed, true);
+ stop_critical_timings();
+ success = !arm_cpuidle_suspend(state_id);
+ start_critical_timings();
+ update_debug_pc_event(CPU_EXIT, state_id,
+ success, 0xdeaffeed, true);
+ ret = success;
+ }
+ return ret;
+}
+#else
+static bool psci_enter_sleep(struct lpm_cluster *cluster,
+ int idx, bool from_idle)
+{
+ WARN_ONCE(true, "PSCI cpu_suspend ops not supported\n");
+ return false;
+}
+#endif
+
+static int lpm_cpuidle_select(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev)
+{
+ struct lpm_cluster *cluster = per_cpu(cpu_cluster, dev->cpu);
+ int idx;
+
+ if (!cluster)
+ return 0;
+
+ idx = cpu_power_select(dev, cluster->cpu);
+
+ return idx;
+}
+
+static int lpm_cpuidle_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
+{
+ struct lpm_cluster *cluster = per_cpu(cpu_cluster, dev->cpu);
+ bool success = true;
+ const struct cpumask *cpumask = get_cpu_mask(dev->cpu);
+ ktime_t start = ktime_get();
+ int64_t start_time = ktime_to_ns(ktime_get()), end_time;
+
+ if (idx < 0)
+ return -EINVAL;
+
+ cpu_prepare(cluster, idx, true);
+ cluster_prepare(cluster, cpumask, idx, true, ktime_to_ns(ktime_get()));
+
+ trace_cpu_idle_enter(idx);
+ lpm_stats_cpu_enter(idx, start_time);
+
+ if (need_resched())
+ goto exit;
+
+ if (!use_psci) {
+ if (idx > 0)
+ update_debug_pc_event(CPU_ENTER, idx, 0xdeaffeed,
+ 0xdeaffeed, true);
+ success = msm_cpu_pm_enter_sleep(cluster->cpu->levels[idx].mode,
+ true);
+
+ if (idx > 0)
+ update_debug_pc_event(CPU_EXIT, idx, success,
+ 0xdeaffeed, true);
+ } else {
+ success = psci_enter_sleep(cluster, idx, true);
+ }
+
+exit:
+ end_time = ktime_to_ns(ktime_get());
+ lpm_stats_cpu_exit(idx, end_time, success);
+
+ cluster_unprepare(cluster, cpumask, idx, true, end_time);
+ cpu_unprepare(cluster, idx, true);
+
+ trace_cpu_idle_exit(idx, success);
+ dev->last_residency = ktime_us_delta(ktime_get(), start);
+ local_irq_enable();
+
+ return idx;
+}
+
+#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS
+static int cpuidle_register_cpu(struct cpuidle_driver *drv,
+ struct cpumask *mask)
+{
+ struct cpuidle_device *device;
+ int cpu, ret;
+
+
+ if (!mask || !drv)
+ return -EINVAL;
+
+ drv->cpumask = mask;
+ ret = cpuidle_register_driver(drv);
+ if (ret) {
+ pr_err("Failed to register cpuidle driver %d\n", ret);
+ goto failed_driver_register;
+ }
+
+ for_each_cpu(cpu, mask) {
+ device = &per_cpu(cpuidle_dev, cpu);
+ device->cpu = cpu;
+
+ ret = cpuidle_register_device(device);
+ if (ret) {
+ pr_err("Failed to register cpuidle driver for cpu:%u\n",
+ cpu);
+ goto failed_driver_register;
+ }
+ }
+ return ret;
+failed_driver_register:
+ for_each_cpu(cpu, mask)
+ cpuidle_unregister_driver(drv);
+ return ret;
+}
+#else
+static int cpuidle_register_cpu(struct cpuidle_driver *drv,
+ struct cpumask *mask)
+{
+ return cpuidle_register(drv, NULL);
+}
+#endif
+
+static struct cpuidle_governor lpm_governor = {
+ .name = "qcom",
+ .rating = 30,
+ .select = lpm_cpuidle_select,
+ .owner = THIS_MODULE,
+};
+
+static int cluster_cpuidle_register(struct lpm_cluster *cl)
+{
+ int i = 0, ret = 0;
+ unsigned int cpu;
+ struct lpm_cluster *p = NULL;
+
+ if (!cl->cpu) {
+ struct lpm_cluster *n;
+
+ list_for_each_entry(n, &cl->child, list) {
+ ret = cluster_cpuidle_register(n);
+ if (ret)
+ break;
+ }
+ return ret;
+ }
+
+ cl->drv = kzalloc(sizeof(*cl->drv), GFP_KERNEL);
+ if (!cl->drv)
+ return -ENOMEM;
+
+ cl->drv->name = "msm_idle";
+
+ for (i = 0; i < cl->cpu->nlevels; i++) {
+ struct cpuidle_state *st = &cl->drv->states[i];
+ struct lpm_cpu_level *cpu_level = &cl->cpu->levels[i];
+
+ snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i);
+ snprintf(st->desc, CPUIDLE_DESC_LEN, "%s", cpu_level->name);
+ st->flags = 0;
+ st->exit_latency = cpu_level->pwr.latency_us;
+ st->power_usage = cpu_level->pwr.ss_power;
+ st->target_residency = 0;
+ st->enter = lpm_cpuidle_enter;
+ }
+
+ cl->drv->state_count = cl->cpu->nlevels;
+ cl->drv->safe_state_index = 0;
+ for_each_cpu(cpu, &cl->child_cpus)
+ per_cpu(cpu_cluster, cpu) = cl;
+
+ for_each_possible_cpu(cpu) {
+ if (cpu_online(cpu))
+ continue;
+ p = per_cpu(cpu_cluster, cpu);
+ while (p) {
+ int j;
+
+ spin_lock(&p->sync_lock);
+ cpumask_set_cpu(cpu, &p->num_children_in_sync);
+ for (j = 0; j < p->nlevels; j++)
+ cpumask_copy(&p->levels[j].num_cpu_votes,
+ &p->num_children_in_sync);
+ spin_unlock(&p->sync_lock);
+ p = p->parent;
+ }
+ }
+ ret = cpuidle_register_cpu(cl->drv, &cl->child_cpus);
+
+ if (ret) {
+ kfree(cl->drv);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
+ * init_lpm - initializes the governor
+ */
+static int __init init_lpm(void)
+{
+ return cpuidle_register_governor(&lpm_governor);
+}
+
+postcore_initcall(init_lpm);
+
+static void register_cpu_lpm_stats(struct lpm_cpu *cpu,
+ struct lpm_cluster *parent)
+{
+ const char **level_name;
+ int i;
+
+ level_name = kcalloc(cpu->nlevels, sizeof(*level_name), GFP_KERNEL);
+
+ if (!level_name)
+ return;
+
+ for (i = 0; i < cpu->nlevels; i++)
+ level_name[i] = cpu->levels[i].name;
+
+ lpm_stats_config_level("cpu", level_name, cpu->nlevels,
+ parent->stats, &parent->child_cpus);
+
+ kfree(level_name);
+}
+
+static void register_cluster_lpm_stats(struct lpm_cluster *cl,
+ struct lpm_cluster *parent)
+{
+ const char **level_name;
+ int i;
+ struct lpm_cluster *child;
+
+ if (!cl)
+ return;
+
+ level_name = kcalloc(cl->nlevels, sizeof(*level_name), GFP_KERNEL);
+
+ if (!level_name)
+ return;
+
+ for (i = 0; i < cl->nlevels; i++)
+ level_name[i] = cl->levels[i].level_name;
+
+ cl->stats = lpm_stats_config_level(cl->cluster_name, level_name,
+ cl->nlevels, parent ? parent->stats : NULL, NULL);
+
+ kfree(level_name);
+
+ if (cl->cpu) {
+ register_cpu_lpm_stats(cl->cpu, cl);
+ return;
+ }
+
+ list_for_each_entry(child, &cl->child, list)
+ register_cluster_lpm_stats(child, cl);
+}
+
+static int lpm_suspend_prepare(void)
+{
+ suspend_in_progress = true;
+ lpm_stats_suspend_enter();
+
+ return 0;
+}
+
+static void lpm_suspend_wake(void)
+{
+ suspend_in_progress = false;
+ lpm_stats_suspend_exit();
+}
+
+static int lpm_suspend_enter(suspend_state_t state)
+{
+ int cpu = raw_smp_processor_id();
+ struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+ struct lpm_cpu *lpm_cpu = cluster->cpu;
+ const struct cpumask *cpumask = get_cpu_mask(cpu);
+ int idx;
+
+ for (idx = lpm_cpu->nlevels - 1; idx >= 0; idx--) {
+
+ if (lpm_cpu_mode_allow(cpu, idx, false))
+ break;
+ }
+ if (idx < 0) {
+ pr_err("Failed suspend\n");
+ return 0;
+ }
+ cpu_prepare(cluster, idx, false);
+ cluster_prepare(cluster, cpumask, idx, false, 0);
+ if (idx > 0)
+ update_debug_pc_event(CPU_ENTER, idx, 0xdeaffeed,
+ 0xdeaffeed, false);
+
+ /*
+ * Print the clocks which are enabled during system suspend
+ * This debug information is useful to know which are the
+ * clocks that are enabled and preventing the system level
+ * LPMs(XO and Vmin).
+ */
+ clock_debug_print_enabled(true);
+
+ if (!use_psci)
+ msm_cpu_pm_enter_sleep(cluster->cpu->levels[idx].mode, false);
+ else
+ psci_enter_sleep(cluster, idx, true);
+
+ if (idx > 0)
+ update_debug_pc_event(CPU_EXIT, idx, true, 0xdeaffeed,
+ false);
+
+ cluster_unprepare(cluster, cpumask, idx, false, 0);
+ cpu_unprepare(cluster, idx, false);
+ return 0;
+}
+
+static int lpm_dying_cpu(unsigned int cpu)
+{
+ struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+
+ update_debug_pc_event(CPU_HP_DYING, cpu,
+ cluster->num_children_in_sync.bits[0],
+ cluster->child_cpus.bits[0], false);
+ cluster_prepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
+ return 0;
+}
+
+static int lpm_starting_cpu(unsigned int cpu)
+{
+ struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+
+ update_debug_pc_event(CPU_HP_STARTING, cpu,
+ cluster->num_children_in_sync.bits[0],
+ cluster->child_cpus.bits[0], false);
+ cluster_unprepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
+ return 0;
+}
+
+static const struct platform_suspend_ops lpm_suspend_ops = {
+ .enter = lpm_suspend_enter,
+ .valid = suspend_valid_only_mem,
+ .prepare_late = lpm_suspend_prepare,
+ .wake = lpm_suspend_wake,
+};
+
+static int lpm_probe(struct platform_device *pdev)
+{
+ int ret;
+ int size;
+ struct kobject *module_kobj = NULL;
+ struct md_region md_entry;
+
+ get_online_cpus();
+ lpm_root_node = lpm_of_parse_cluster(pdev);
+
+ if (IS_ERR_OR_NULL(lpm_root_node)) {
+ pr_err("%s(): Failed to probe low power modes\n", __func__);
+ put_online_cpus();
+ return PTR_ERR(lpm_root_node);
+ }
+
+ if (print_parsed_dt)
+ cluster_dt_walkthrough(lpm_root_node);
+
+ /*
+ * Register hotplug notifier before broadcast time to ensure there
+ * to prevent race where a broadcast timer might not be setup on for a
+ * core. BUG in existing code but no known issues possibly because of
+ * how late lpm_levels gets initialized.
+ */
+ suspend_set_ops(&lpm_suspend_ops);
+ hrtimer_init(&lpm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+ ret = remote_spin_lock_init(&scm_handoff_lock, SCM_HANDOFF_LOCK_ID);
+ if (ret) {
+ pr_err("%s: Failed initializing scm_handoff_lock (%d)\n",
+ __func__, ret);
+ put_online_cpus();
+ return ret;
+ }
+ size = num_dbg_elements * sizeof(struct lpm_debug);
+ lpm_debug = dma_alloc_coherent(&pdev->dev, size,
+ &lpm_debug_phys, GFP_KERNEL);
+ register_cluster_lpm_stats(lpm_root_node, NULL);
+
+ ret = cluster_cpuidle_register(lpm_root_node);
+ put_online_cpus();
+ if (ret) {
+ pr_err("%s()Failed to register with cpuidle framework\n",
+ __func__);
+ goto failed;
+ }
+ ret = cpuhp_setup_state(CPUHP_AP_QCOM_SLEEP_STARTING,
+ "AP_QCOM_SLEEP_STARTING",
+ lpm_starting_cpu, lpm_dying_cpu);
+ if (ret)
+ goto failed;
+
+ module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!module_kobj) {
+ pr_err("%s: cannot find kobject for module %s\n",
+ __func__, KBUILD_MODNAME);
+ ret = -ENOENT;
+ goto failed;
+ }
+
+ ret = create_cluster_lvl_nodes(lpm_root_node, module_kobj);
+ if (ret) {
+ pr_err("%s(): Failed to create cluster level nodes\n",
+ __func__);
+ goto failed;
+ }
+
+ /* Add lpm_debug to Minidump*/
+ strlcpy(md_entry.name, "KLPMDEBUG", sizeof(md_entry.name));
+ md_entry.virt_addr = (uintptr_t)lpm_debug;
+ md_entry.phys_addr = lpm_debug_phys;
+ md_entry.size = size;
+ if (msm_minidump_add_region(&md_entry))
+ pr_info("Failed to add lpm_debug in Minidump\n");
+
+ return 0;
+failed:
+ free_cluster_node(lpm_root_node);
+ lpm_root_node = NULL;
+ return ret;
+}
+
+static const struct of_device_id lpm_mtch_tbl[] = {
+ {.compatible = "qcom,lpm-levels"},
+ {},
+};
+
+static struct platform_driver lpm_driver = {
+ .probe = lpm_probe,
+ .driver = {
+ .name = "lpm-levels",
+ .owner = THIS_MODULE,
+ .of_match_table = lpm_mtch_tbl,
+ },
+};
+
+static int __init lpm_levels_module_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&lpm_driver);
+ if (rc) {
+ pr_info("Error registering %s\n", lpm_driver.driver.name);
+ goto fail;
+ }
+
+fail:
+ return rc;
+}
+late_initcall(lpm_levels_module_init);
+
+enum msm_pm_l2_scm_flag lpm_cpu_pre_pc_cb(unsigned int cpu)
+{
+ struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+ enum msm_pm_l2_scm_flag retflag = MSM_SCM_L2_ON;
+
+ /*
+ * No need to acquire the lock if probe isn't completed yet
+ * In the event of the hotplug happening before lpm probe, we want to
+ * flush the cache to make sure that L2 is flushed. In particular, this
+ * could cause incoherencies for a cluster architecture. This wouldn't
+ * affect the idle case as the idle driver wouldn't be registered
+ * before the probe function
+ */
+ if (!cluster)
+ return MSM_SCM_L2_OFF;
+
+ /*
+ * Assumes L2 only. What/How parameters gets passed into TZ will
+ * determine how this function reports this info back in msm-pm.c
+ */
+ spin_lock(&cluster->sync_lock);
+
+ if (!cluster->lpm_dev) {
+ retflag = MSM_SCM_L2_OFF;
+ goto unlock_and_return;
+ }
+
+ if (!cpumask_equal(&cluster->num_children_in_sync,
+ &cluster->child_cpus))
+ goto unlock_and_return;
+
+ if (cluster->lpm_dev)
+ retflag = cluster->lpm_dev->tz_flag;
+ /*
+ * The scm_handoff_lock will be release by the secure monitor.
+ * It is used to serialize power-collapses from this point on,
+ * so that both Linux and the secure context have a consistent
+ * view regarding the number of running cpus (cpu_count).
+ *
+ * It must be acquired before releasing the cluster lock.
+ */
+unlock_and_return:
+ update_debug_pc_event(PRE_PC_CB, retflag, 0xdeadbeef, 0xdeadbeef,
+ 0xdeadbeef);
+ trace_pre_pc_cb(retflag);
+ remote_spin_lock_rlock_id(&scm_handoff_lock,
+ REMOTE_SPINLOCK_TID_START + cpu);
+ spin_unlock(&cluster->sync_lock);
+ return retflag;
+}
+
+/**
+ * lpm_cpu_hotplug_enter(): Called by dying CPU to terminate in low power mode
+ *
+ * @cpu: cpuid of the dying CPU
+ *
+ * Called from platform_cpu_kill() to terminate hotplug in a low power mode
+ */
+void lpm_cpu_hotplug_enter(unsigned int cpu)
+{
+ enum msm_pm_sleep_mode mode = MSM_PM_SLEEP_MODE_NR;
+ struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+ int i;
+ int idx = -1;
+
+ /*
+ * If lpm isn't probed yet, try to put cpu into the one of the modes
+ * available
+ */
+ if (!cluster) {
+ if (msm_spm_is_mode_avail(
+ MSM_SPM_MODE_POWER_COLLAPSE)){
+ mode = MSM_PM_SLEEP_MODE_POWER_COLLAPSE;
+ } else if (msm_spm_is_mode_avail(
+ MSM_SPM_MODE_FASTPC)) {
+ mode = MSM_PM_SLEEP_MODE_FASTPC;
+ } else if (msm_spm_is_mode_avail(
+ MSM_SPM_MODE_RETENTION)) {
+ mode = MSM_PM_SLEEP_MODE_RETENTION;
+ } else {
+ pr_err("No mode avail for cpu%d hotplug\n", cpu);
+ WARN_ON(1);
+ return;
+ }
+ } else {
+ struct lpm_cpu *lpm_cpu;
+ uint32_t ss_pwr = ~0U;
+
+ lpm_cpu = cluster->cpu;
+ for (i = 0; i < lpm_cpu->nlevels; i++) {
+ if (ss_pwr < lpm_cpu->levels[i].pwr.ss_power)
+ continue;
+ ss_pwr = lpm_cpu->levels[i].pwr.ss_power;
+ idx = i;
+ mode = lpm_cpu->levels[i].mode;
+ }
+
+ if (mode == MSM_PM_SLEEP_MODE_NR)
+ return;
+
+ WARN_ON(idx < 0);
+ cluster_prepare(cluster, get_cpu_mask(cpu), idx, false, 0);
+ }
+
+ msm_cpu_pm_enter_sleep(mode, false);
+}
diff --git a/drivers/cpuidle/lpm-levels-legacy.h b/drivers/cpuidle/lpm-levels-legacy.h
new file mode 100644
index 0000000..4a07355
--- /dev/null
+++ b/drivers/cpuidle/lpm-levels-legacy.h
@@ -0,0 +1,152 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <soc/qcom/pm-legacy.h>
+#include <soc/qcom/spm.h>
+
+#define NR_LPM_LEVELS 8
+
+extern bool use_psci;
+
+struct lpm_lookup_table {
+ uint32_t modes;
+ const char *mode_name;
+};
+
+struct power_params {
+ uint32_t latency_us; /* Enter + Exit latency */
+ uint32_t ss_power; /* Steady state power */
+ uint32_t energy_overhead; /* Enter + exit over head */
+ uint32_t time_overhead_us; /* Enter + exit overhead */
+ uint32_t residencies[NR_LPM_LEVELS];
+ uint32_t max_residency;
+};
+
+struct lpm_cpu_level {
+ const char *name;
+ enum msm_pm_sleep_mode mode;
+ bool use_bc_timer;
+ struct power_params pwr;
+ unsigned int psci_id;
+ bool is_reset;
+ bool jtag_save_restore;
+ bool hyp_psci;
+ int reset_level;
+};
+
+struct lpm_cpu {
+ struct lpm_cpu_level levels[NR_LPM_LEVELS];
+ int nlevels;
+ unsigned int psci_mode_shift;
+ unsigned int psci_mode_mask;
+ struct lpm_cluster *parent;
+};
+
+struct lpm_level_avail {
+ bool idle_enabled;
+ bool suspend_enabled;
+ struct kobject *kobj;
+ struct kobj_attribute idle_enabled_attr;
+ struct kobj_attribute suspend_enabled_attr;
+ void *data;
+ int idx;
+ bool cpu_node;
+};
+
+struct lpm_cluster_level {
+ const char *level_name;
+ int *mode; /* SPM mode to enter */
+ int min_child_level;
+ struct cpumask num_cpu_votes;
+ struct power_params pwr;
+ bool notify_rpm;
+ bool disable_dynamic_routing;
+ bool sync_level;
+ bool last_core_only;
+ struct lpm_level_avail available;
+ unsigned int psci_id;
+ bool is_reset;
+ int reset_level;
+ bool no_cache_flush;
+};
+
+struct low_power_ops {
+ struct msm_spm_device *spm;
+ int (*set_mode)(struct low_power_ops *ops, int mode,
+ struct lpm_cluster_level *level);
+ enum msm_pm_l2_scm_flag tz_flag;
+};
+
+struct lpm_cluster {
+ struct list_head list;
+ struct list_head child;
+ const char *cluster_name;
+ const char **name;
+ unsigned long aff_level; /* Affinity level of the node */
+ struct low_power_ops *lpm_dev;
+ int ndevices;
+ struct lpm_cluster_level levels[NR_LPM_LEVELS];
+ int nlevels;
+ enum msm_pm_l2_scm_flag l2_flag;
+ int min_child_level;
+ int default_level;
+ int last_level;
+ struct lpm_cpu *cpu;
+ struct cpuidle_driver *drv;
+ spinlock_t sync_lock;
+ struct cpumask child_cpus;
+ struct cpumask num_children_in_sync;
+ struct lpm_cluster *parent;
+ struct lpm_stats *stats;
+ unsigned int psci_mode_shift;
+ unsigned int psci_mode_mask;
+ bool no_saw_devices;
+};
+
+int set_l2_mode(struct low_power_ops *ops, int mode,
+ struct lpm_cluster_level *level);
+int set_system_mode(struct low_power_ops *ops, int mode,
+ struct lpm_cluster_level *level);
+int set_l3_mode(struct low_power_ops *ops, int mode,
+ struct lpm_cluster_level *level);
+void lpm_suspend_wake_time(uint64_t wakeup_time);
+
+struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev);
+void free_cluster_node(struct lpm_cluster *cluster);
+void cluster_dt_walkthrough(struct lpm_cluster *cluster);
+
+int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj);
+bool lpm_cpu_mode_allow(unsigned int cpu,
+ unsigned int mode, bool from_idle);
+bool lpm_cluster_mode_allow(struct lpm_cluster *cluster,
+ unsigned int mode, bool from_idle);
+uint32_t *get_per_cpu_max_residency(int cpu);
+extern struct lpm_cluster *lpm_root_node;
+
+#ifdef CONFIG_SMP
+extern DEFINE_PER_CPU(bool, pending_ipi);
+static inline bool is_IPI_pending(const struct cpumask *mask)
+{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, mask) {
+ if per_cpu(pending_ipi, cpu)
+ return true;
+ }
+ return false;
+}
+#else
+static inline bool is_IPI_pending(const struct cpumask *mask)
+{
+ return false;
+}
+#endif
diff --git a/drivers/cpuidle/lpm-levels-of-legacy.c b/drivers/cpuidle/lpm-levels-of-legacy.c
new file mode 100644
index 0000000..bf74124
--- /dev/null
+++ b/drivers/cpuidle/lpm-levels-of-legacy.c
@@ -0,0 +1,1006 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/moduleparam.h>
+#include "lpm-levels-legacy.h"
+
+bool use_psci;
+enum lpm_type {
+ IDLE = 0,
+ SUSPEND,
+ LPM_TYPE_NR
+};
+
+struct lpm_type_str {
+ enum lpm_type type;
+ char *str;
+};
+
+static const struct lpm_type_str lpm_types[] = {
+ {IDLE, "idle_enabled"},
+ {SUSPEND, "suspend_enabled"},
+};
+
+static DEFINE_PER_CPU(uint32_t *, max_residency);
+static struct lpm_level_avail *cpu_level_available[NR_CPUS];
+static struct platform_device *lpm_pdev;
+
+static void *get_enabled_ptr(struct kobj_attribute *attr,
+ struct lpm_level_avail *avail)
+{
+ void *arg = NULL;
+
+ if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
+ arg = (void *) &avail->idle_enabled;
+ else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
+ arg = (void *) &avail->suspend_enabled;
+
+ return arg;
+}
+
+static struct lpm_level_avail *get_avail_ptr(struct kobject *kobj,
+ struct kobj_attribute *attr)
+{
+ struct lpm_level_avail *avail = NULL;
+
+ if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
+ avail = container_of(attr, struct lpm_level_avail,
+ idle_enabled_attr);
+ else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
+ avail = container_of(attr, struct lpm_level_avail,
+ suspend_enabled_attr);
+
+ return avail;
+}
+
+static void set_optimum_cpu_residency(struct lpm_cpu *cpu, int cpu_id,
+ bool probe_time)
+{
+ int i, j;
+ bool mode_avail;
+ uint32_t *residency = per_cpu(max_residency, cpu_id);
+
+ for (i = 0; i < cpu->nlevels; i++) {
+ struct power_params *pwr = &cpu->levels[i].pwr;
+
+ mode_avail = probe_time ||
+ lpm_cpu_mode_allow(cpu_id, i, true);
+
+ if (!mode_avail) {
+ residency[i] = 0;
+ continue;
+ }
+
+ residency[i] = ~0;
+ for (j = i + 1; j < cpu->nlevels; j++) {
+ mode_avail = probe_time ||
+ lpm_cpu_mode_allow(cpu_id, j, true);
+
+ if (mode_avail &&
+ (residency[i] > pwr->residencies[j]) &&
+ (pwr->residencies[j] != 0))
+ residency[i] = pwr->residencies[j];
+ }
+ }
+}
+
+static void set_optimum_cluster_residency(struct lpm_cluster *cluster,
+ bool probe_time)
+{
+ int i, j;
+ bool mode_avail;
+
+ for (i = 0; i < cluster->nlevels; i++) {
+ struct power_params *pwr = &cluster->levels[i].pwr;
+
+ mode_avail = probe_time ||
+ lpm_cluster_mode_allow(cluster, i,
+ true);
+
+ if (!mode_avail) {
+ pwr->max_residency = 0;
+ continue;
+ }
+
+ pwr->max_residency = ~0;
+ for (j = i+1; j < cluster->nlevels; j++) {
+ mode_avail = probe_time ||
+ lpm_cluster_mode_allow(cluster, j,
+ true);
+ if (mode_avail &&
+ (pwr->max_residency > pwr->residencies[j]) &&
+ (pwr->residencies[j] != 0))
+ pwr->max_residency = pwr->residencies[j];
+ }
+ }
+}
+
+uint32_t *get_per_cpu_max_residency(int cpu)
+{
+ return per_cpu(max_residency, cpu);
+}
+
+static ssize_t lpm_enable_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ int ret = 0;
+ struct kernel_param kp;
+
+ kp.arg = get_enabled_ptr(attr, get_avail_ptr(kobj, attr));
+ ret = param_get_bool(buf, &kp);
+ if (ret > 0) {
+ strlcat(buf, "\n", PAGE_SIZE);
+ ret++;
+ }
+
+ return ret;
+}
+
+static ssize_t lpm_enable_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t len)
+{
+ int ret = 0;
+ struct kernel_param kp;
+ struct lpm_level_avail *avail;
+
+ avail = get_avail_ptr(kobj, attr);
+ if (WARN_ON(!avail))
+ return -EINVAL;
+
+ kp.arg = get_enabled_ptr(attr, avail);
+ ret = param_set_bool(buf, &kp);
+
+ if (avail->cpu_node)
+ set_optimum_cpu_residency(avail->data, avail->idx, false);
+ else
+ set_optimum_cluster_residency(avail->data, false);
+
+ return ret ? ret : len;
+}
+
+static int create_lvl_avail_nodes(const char *name,
+ struct kobject *parent, struct lpm_level_avail *avail,
+ void *data, int index, bool cpu_node)
+{
+ struct attribute_group *attr_group = NULL;
+ struct attribute **attr = NULL;
+ struct kobject *kobj = NULL;
+ int ret = 0;
+
+ kobj = kobject_create_and_add(name, parent);
+ if (!kobj)
+ return -ENOMEM;
+
+ attr_group = devm_kzalloc(&lpm_pdev->dev, sizeof(*attr_group),
+ GFP_KERNEL);
+ if (!attr_group) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ attr = devm_kzalloc(&lpm_pdev->dev,
+ sizeof(*attr) * (LPM_TYPE_NR + 1), GFP_KERNEL);
+ if (!attr) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ sysfs_attr_init(&avail->idle_enabled_attr.attr);
+ avail->idle_enabled_attr.attr.name = lpm_types[IDLE].str;
+ avail->idle_enabled_attr.attr.mode = 0644;
+ avail->idle_enabled_attr.show = lpm_enable_show;
+ avail->idle_enabled_attr.store = lpm_enable_store;
+
+ sysfs_attr_init(&avail->suspend_enabled_attr.attr);
+ avail->suspend_enabled_attr.attr.name = lpm_types[SUSPEND].str;
+ avail->suspend_enabled_attr.attr.mode = 0644;
+ avail->suspend_enabled_attr.show = lpm_enable_show;
+ avail->suspend_enabled_attr.store = lpm_enable_store;
+
+ attr[0] = &avail->idle_enabled_attr.attr;
+ attr[1] = &avail->suspend_enabled_attr.attr;
+ attr[2] = NULL;
+ attr_group->attrs = attr;
+
+ ret = sysfs_create_group(kobj, attr_group);
+ if (ret) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ avail->idle_enabled = true;
+ avail->suspend_enabled = true;
+ avail->kobj = kobj;
+ avail->data = data;
+ avail->idx = index;
+ avail->cpu_node = cpu_node;
+
+ return ret;
+
+failed:
+ kobject_put(kobj);
+ return ret;
+}
+
+static int create_cpu_lvl_nodes(struct lpm_cluster *p, struct kobject *parent)
+{
+ int cpu;
+ int i, cpu_idx;
+ struct kobject **cpu_kobj = NULL;
+ struct lpm_level_avail *level_list = NULL;
+ char cpu_name[20] = {0};
+ int ret = 0;
+
+ cpu_kobj = devm_kzalloc(&lpm_pdev->dev, sizeof(*cpu_kobj) *
+ cpumask_weight(&p->child_cpus), GFP_KERNEL);
+ if (!cpu_kobj)
+ return -ENOMEM;
+
+ cpu_idx = 0;
+ for_each_cpu(cpu, &p->child_cpus) {
+ snprintf(cpu_name, sizeof(cpu_name), "cpu%d", cpu);
+ cpu_kobj[cpu_idx] = kobject_create_and_add(cpu_name, parent);
+ if (!cpu_kobj[cpu_idx]) {
+ ret = -ENOMEM;
+ goto release_kobj;
+ }
+
+ level_list = devm_kzalloc(&lpm_pdev->dev,
+ p->cpu->nlevels * sizeof(*level_list),
+ GFP_KERNEL);
+ if (!level_list) {
+ ret = -ENOMEM;
+ goto release_kobj;
+ }
+
+ for (i = 0; i < p->cpu->nlevels; i++) {
+
+ ret = create_lvl_avail_nodes(p->cpu->levels[i].name,
+ cpu_kobj[cpu_idx], &level_list[i],
+ (void *)p->cpu, cpu, true);
+ if (ret)
+ goto release_kobj;
+ }
+
+ cpu_level_available[cpu] = level_list;
+ cpu_idx++;
+ }
+
+ return ret;
+
+release_kobj:
+ for (i = 0; i < cpumask_weight(&p->child_cpus); i++)
+ kobject_put(cpu_kobj[i]);
+
+ return ret;
+}
+
+int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj)
+{
+ int ret = 0;
+ struct lpm_cluster *child = NULL;
+ int i;
+ struct kobject *cluster_kobj = NULL;
+
+ if (!p)
+ return -ENODEV;
+
+ cluster_kobj = kobject_create_and_add(p->cluster_name, kobj);
+ if (!cluster_kobj)
+ return -ENOMEM;
+
+ for (i = 0; i < p->nlevels; i++) {
+ ret = create_lvl_avail_nodes(p->levels[i].level_name,
+ cluster_kobj, &p->levels[i].available,
+ (void *)p, 0, false);
+ if (ret)
+ return ret;
+ }
+
+ list_for_each_entry(child, &p->child, list) {
+ ret = create_cluster_lvl_nodes(child, cluster_kobj);
+ if (ret)
+ return ret;
+ }
+
+ if (p->cpu) {
+ ret = create_cpu_lvl_nodes(p, cluster_kobj);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+bool lpm_cpu_mode_allow(unsigned int cpu,
+ unsigned int index, bool from_idle)
+{
+ struct lpm_level_avail *avail = cpu_level_available[cpu];
+
+ if (!lpm_pdev || !avail)
+ return !from_idle;
+
+ return !!(from_idle ? avail[index].idle_enabled :
+ avail[index].suspend_enabled);
+}
+
+bool lpm_cluster_mode_allow(struct lpm_cluster *cluster,
+ unsigned int mode, bool from_idle)
+{
+ struct lpm_level_avail *avail = &cluster->levels[mode].available;
+
+ if (!lpm_pdev || !avail)
+ return false;
+
+ return !!(from_idle ? avail->idle_enabled :
+ avail->suspend_enabled);
+}
+
+static int parse_legacy_cluster_params(struct device_node *node,
+ struct lpm_cluster *c)
+{
+ int i;
+ char *key;
+ int ret;
+ struct lpm_match {
+ char *devname;
+ int (*set_mode)(struct low_power_ops *, int,
+ struct lpm_cluster_level *);
+ };
+ struct lpm_match match_tbl[] = {
+ {"l2", set_l2_mode},
+ {"cci", set_system_mode},
+ {"l3", set_l3_mode},
+ {"cbf", set_system_mode},
+ };
+
+
+ key = "qcom,spm-device-names";
+ c->ndevices = of_property_count_strings(node, key);
+
+ if (c->ndevices < 0) {
+ pr_info("%s(): Ignoring cluster params\n", __func__);
+ c->no_saw_devices = true;
+ c->ndevices = 0;
+ return 0;
+ }
+
+ c->name = devm_kzalloc(&lpm_pdev->dev, c->ndevices * sizeof(*c->name),
+ GFP_KERNEL);
+ c->lpm_dev = devm_kzalloc(&lpm_pdev->dev,
+ c->ndevices * sizeof(*c->lpm_dev),
+ GFP_KERNEL);
+ if (!c->name || !c->lpm_dev) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ for (i = 0; i < c->ndevices; i++) {
+ char device_name[20];
+ int j;
+
+ ret = of_property_read_string_index(node, key, i, &c->name[i]);
+ if (ret)
+ goto failed;
+ snprintf(device_name, sizeof(device_name), "%s-%s",
+ c->cluster_name, c->name[i]);
+
+ c->lpm_dev[i].spm = msm_spm_get_device_by_name(device_name);
+
+ if (IS_ERR_OR_NULL(c->lpm_dev[i].spm)) {
+ pr_err("Failed to get spm device by name:%s\n",
+ device_name);
+ ret = PTR_ERR(c->lpm_dev[i].spm);
+ goto failed;
+ }
+ for (j = 0; j < ARRAY_SIZE(match_tbl); j++) {
+ if (!strcmp(c->name[i], match_tbl[j].devname))
+ c->lpm_dev[i].set_mode = match_tbl[j].set_mode;
+ }
+
+ if (!c->lpm_dev[i].set_mode) {
+ ret = -ENODEV;
+ goto failed;
+ }
+ }
+
+ key = "qcom,default-level";
+ if (of_property_read_u32(node, key, &c->default_level))
+ c->default_level = 0;
+ return 0;
+failed:
+ pr_err("%s(): Failed reading %s\n", __func__, key);
+ return ret;
+}
+
+static int parse_cluster_params(struct device_node *node,
+ struct lpm_cluster *c)
+{
+ char *key;
+ int ret;
+
+ key = "label";
+ ret = of_property_read_string(node, key, &c->cluster_name);
+ if (ret) {
+ pr_err("%s(): Cannot read required param %s\n", __func__, key);
+ return ret;
+ }
+
+ if (use_psci) {
+ key = "qcom,psci-mode-shift";
+ ret = of_property_read_u32(node, key,
+ &c->psci_mode_shift);
+ if (ret) {
+ pr_err("%s(): Failed to read param: %s\n",
+ __func__, key);
+ return ret;
+ }
+
+ key = "qcom,psci-mode-mask";
+ ret = of_property_read_u32(node, key,
+ &c->psci_mode_mask);
+ if (ret) {
+ pr_err("%s(): Failed to read param: %s\n",
+ __func__, key);
+ return ret;
+ }
+
+ /* Set ndevice to 1 as default */
+ c->ndevices = 1;
+
+ return 0;
+ } else
+ return parse_legacy_cluster_params(node, c);
+}
+
+static int parse_lpm_mode(const char *str)
+{
+ int i;
+ struct lpm_lookup_table mode_lookup[] = {
+ {MSM_SPM_MODE_POWER_COLLAPSE, "pc"},
+ {MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE, "spc"},
+ {MSM_SPM_MODE_FASTPC, "fpc"},
+ {MSM_SPM_MODE_GDHS, "gdhs"},
+ {MSM_SPM_MODE_RETENTION, "retention"},
+ {MSM_SPM_MODE_CLOCK_GATING, "wfi"},
+ {MSM_SPM_MODE_DISABLED, "active"}
+ };
+
+ for (i = 0; i < ARRAY_SIZE(mode_lookup); i++)
+ if (!strcmp(str, mode_lookup[i].mode_name))
+ return mode_lookup[i].modes;
+ return -EINVAL;
+}
+
+static int parse_power_params(struct device_node *node,
+ struct power_params *pwr)
+{
+ char *key;
+ int ret;
+
+ key = "qcom,latency-us";
+ ret = of_property_read_u32(node, key, &pwr->latency_us);
+ if (ret)
+ goto fail;
+
+ key = "qcom,ss-power";
+ ret = of_property_read_u32(node, key, &pwr->ss_power);
+ if (ret)
+ goto fail;
+
+ key = "qcom,energy-overhead";
+ ret = of_property_read_u32(node, key, &pwr->energy_overhead);
+ if (ret)
+ goto fail;
+
+ key = "qcom,time-overhead";
+ ret = of_property_read_u32(node, key, &pwr->time_overhead_us);
+ if (ret)
+ goto fail;
+
+fail:
+ if (ret)
+ pr_err("%s(): %s Error reading %s\n", __func__, node->name,
+ key);
+ return ret;
+}
+
+static int parse_cluster_level(struct device_node *node,
+ struct lpm_cluster *cluster)
+{
+ int i = 0;
+ struct lpm_cluster_level *level = &cluster->levels[cluster->nlevels];
+ int ret = -ENOMEM;
+ char *key;
+
+ key = "label";
+ ret = of_property_read_string(node, key, &level->level_name);
+ if (ret)
+ goto failed;
+
+ if (use_psci) {
+ char *k = "qcom,psci-mode";
+
+ ret = of_property_read_u32(node, k, &level->psci_id);
+ if (ret)
+ goto failed;
+
+ level->is_reset = of_property_read_bool(node, "qcom,is-reset");
+ } else if (!cluster->no_saw_devices) {
+ key = "no saw-devices";
+
+ level->mode = devm_kzalloc(&lpm_pdev->dev,
+ cluster->ndevices * sizeof(*level->mode),
+ GFP_KERNEL);
+ if (!level->mode) {
+ pr_err("Memory allocation failed\n");
+ goto failed;
+ }
+
+ for (i = 0; i < cluster->ndevices; i++) {
+ const char *spm_mode;
+ char key[25] = {0};
+
+ snprintf(key, 25, "qcom,spm-%s-mode", cluster->name[i]);
+ ret = of_property_read_string(node, key, &spm_mode);
+ if (ret)
+ goto failed;
+
+ level->mode[i] = parse_lpm_mode(spm_mode);
+
+ if (level->mode[i] < 0)
+ goto failed;
+
+ if (level->mode[i] == MSM_SPM_MODE_POWER_COLLAPSE
+ || level->mode[i] ==
+ MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE)
+ level->is_reset |= true;
+ }
+ }
+
+ key = "label";
+ ret = of_property_read_string(node, key, &level->level_name);
+ if (ret)
+ goto failed;
+
+ if (cluster->nlevels != cluster->default_level) {
+ key = "min child idx";
+ ret = of_property_read_u32(node, "qcom,min-child-idx",
+ &level->min_child_level);
+ if (ret)
+ goto failed;
+
+ if (cluster->min_child_level > level->min_child_level)
+ cluster->min_child_level = level->min_child_level;
+ }
+
+ level->notify_rpm = of_property_read_bool(node, "qcom,notify-rpm");
+ level->disable_dynamic_routing = of_property_read_bool(node,
+ "qcom,disable-dynamic-int-routing");
+ level->last_core_only = of_property_read_bool(node,
+ "qcom,last-core-only");
+ level->no_cache_flush = of_property_read_bool(node,
+ "qcom,no-cache-flush");
+
+ key = "parse_power_params";
+ ret = parse_power_params(node, &level->pwr);
+ if (ret)
+ goto failed;
+
+ key = "qcom,reset-level";
+ ret = of_property_read_u32(node, key, &level->reset_level);
+ if (ret == -EINVAL)
+ level->reset_level = LPM_RESET_LVL_NONE;
+ else if (ret)
+ goto failed;
+
+ cluster->nlevels++;
+ return 0;
+failed:
+ pr_err("Failed %s() key = %s ret = %d\n", __func__, key, ret);
+ return ret;
+}
+
+static int parse_cpu_spm_mode(const char *mode_name)
+{
+ struct lpm_lookup_table pm_sm_lookup[] = {
+ {MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT,
+ "wfi"},
+ {MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE,
+ "standalone_pc"},
+ {MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
+ "pc"},
+ {MSM_PM_SLEEP_MODE_RETENTION,
+ "retention"},
+ {MSM_PM_SLEEP_MODE_FASTPC,
+ "fpc"},
+ };
+ int i;
+ int ret = -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(pm_sm_lookup); i++) {
+ if (!strcmp(mode_name, pm_sm_lookup[i].mode_name)) {
+ ret = pm_sm_lookup[i].modes;
+ break;
+ }
+ }
+ return ret;
+}
+
+static int parse_cpu_mode(struct device_node *n, struct lpm_cpu_level *l)
+{
+ char *key;
+ int ret;
+
+ key = "qcom,spm-cpu-mode";
+ ret = of_property_read_string(n, key, &l->name);
+ if (ret) {
+ pr_err("Failed %s %d\n", n->name, __LINE__);
+ return ret;
+ }
+
+ if (use_psci) {
+ key = "qcom,psci-cpu-mode";
+
+ ret = of_property_read_u32(n, key, &l->psci_id);
+ if (ret) {
+ pr_err("Failed reading %s on device %s\n", key,
+ n->name);
+ return ret;
+ }
+ key = "qcom,hyp-psci";
+
+ l->hyp_psci = of_property_read_bool(n, key);
+ } else {
+ l->mode = parse_cpu_spm_mode(l->name);
+
+ if (l->mode < 0)
+ return l->mode;
+ }
+ return 0;
+
+}
+
+static int get_cpumask_for_node(struct device_node *node, struct cpumask *mask)
+{
+ struct device_node *cpu_node;
+ int cpu;
+ int idx = 0;
+
+ cpu_node = of_parse_phandle(node, "qcom,cpu", idx++);
+ if (!cpu_node) {
+ pr_info("%s: No CPU phandle, assuming single cluster\n",
+ node->full_name);
+ /*
+ * Not all targets have the cpu node populated in the device
+ * tree. If cpu node is not populated assume all possible
+ * nodes belong to this cluster
+ */
+ cpumask_copy(mask, cpu_possible_mask);
+ return 0;
+ }
+
+ while (cpu_node) {
+ for_each_possible_cpu(cpu) {
+ if (of_get_cpu_node(cpu, NULL) == cpu_node) {
+ cpumask_set_cpu(cpu, mask);
+ break;
+ }
+ }
+ of_node_put(cpu_node);
+ cpu_node = of_parse_phandle(node, "qcom,cpu", idx++);
+ }
+
+ return 0;
+}
+
+static int calculate_residency(struct power_params *base_pwr,
+ struct power_params *next_pwr)
+{
+ int32_t residency = (int32_t)(next_pwr->energy_overhead -
+ base_pwr->energy_overhead) -
+ ((int32_t)(next_pwr->ss_power * next_pwr->time_overhead_us)
+ - (int32_t)(base_pwr->ss_power * base_pwr->time_overhead_us));
+
+ residency /= (int32_t)(base_pwr->ss_power - next_pwr->ss_power);
+
+ if (residency < 0) {
+ pr_err("%s: residency < 0 for LPM\n",
+ __func__);
+ return next_pwr->time_overhead_us;
+ }
+
+ return residency < next_pwr->time_overhead_us ?
+ next_pwr->time_overhead_us : residency;
+}
+
+static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
+{
+ struct device_node *n;
+ int ret = -ENOMEM;
+ int i, j;
+ char *key;
+
+ c->cpu = devm_kzalloc(&lpm_pdev->dev, sizeof(*c->cpu), GFP_KERNEL);
+ if (!c->cpu)
+ return ret;
+
+ c->cpu->parent = c;
+ if (use_psci) {
+
+ key = "qcom,psci-mode-shift";
+
+ ret = of_property_read_u32(node, key, &c->cpu->psci_mode_shift);
+ if (ret) {
+ pr_err("Failed reading %s on device %s\n", key,
+ node->name);
+ return ret;
+ }
+ key = "qcom,psci-mode-mask";
+
+ ret = of_property_read_u32(node, key, &c->cpu->psci_mode_mask);
+ if (ret) {
+ pr_err("Failed reading %s on device %s\n", key,
+ node->name);
+ return ret;
+ }
+ }
+ for_each_child_of_node(node, n) {
+ struct lpm_cpu_level *l = &c->cpu->levels[c->cpu->nlevels];
+
+ c->cpu->nlevels++;
+
+ ret = parse_cpu_mode(n, l);
+ if (ret < 0) {
+ pr_info("Failed %s\n", l->name);
+ goto failed;
+ }
+
+ ret = parse_power_params(n, &l->pwr);
+ if (ret)
+ goto failed;
+
+ key = "qcom,use-broadcast-timer";
+ l->use_bc_timer = of_property_read_bool(n, key);
+
+ l->is_reset = of_property_read_bool(n, "qcom,is-reset");
+
+ key = "qcom,jtag-save-restore";
+ l->jtag_save_restore = of_property_read_bool(n, key);
+
+ key = "qcom,reset-level";
+ ret = of_property_read_u32(n, key, &l->reset_level);
+ if (ret == -EINVAL)
+ l->reset_level = LPM_RESET_LVL_NONE;
+ else if (ret)
+ goto failed;
+ of_node_put(n);
+ }
+ for (i = 0; i < c->cpu->nlevels; i++) {
+ for (j = 0; j < c->cpu->nlevels; j++) {
+ if (i >= j) {
+ c->cpu->levels[i].pwr.residencies[j] = 0;
+ continue;
+ }
+
+ c->cpu->levels[i].pwr.residencies[j] =
+ calculate_residency(&c->cpu->levels[i].pwr,
+ &c->cpu->levels[j].pwr);
+
+ pr_err("%s: idx %d %u\n", __func__, j,
+ c->cpu->levels[i].pwr.residencies[j]);
+ }
+ }
+
+ return 0;
+failed:
+ of_node_put(n);
+ pr_err("%s(): Failed with error code:%d\n", __func__, ret);
+ return ret;
+}
+
+void free_cluster_node(struct lpm_cluster *cluster)
+{
+ struct lpm_cluster *cl, *m;
+
+ list_for_each_entry_safe(cl, m, &cluster->child, list) {
+ list_del(&cl->list);
+ free_cluster_node(cl);
+ };
+
+ cluster->ndevices = 0;
+}
+
+/*
+ * TODO:
+ * Expects a CPU or a cluster only. This ensures that affinity
+ * level of a cluster is consistent with reference to its
+ * child nodes.
+ */
+static struct lpm_cluster *parse_cluster(struct device_node *node,
+ struct lpm_cluster *parent)
+{
+ struct lpm_cluster *c;
+ struct device_node *n;
+ char *key;
+ int ret = 0;
+ int i, j;
+
+ c = devm_kzalloc(&lpm_pdev->dev, sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ ret = parse_cluster_params(node, c);
+
+ if (ret)
+ goto failed_parse_params;
+
+ INIT_LIST_HEAD(&c->child);
+ c->parent = parent;
+ spin_lock_init(&c->sync_lock);
+ c->min_child_level = NR_LPM_LEVELS;
+
+ for_each_child_of_node(node, n) {
+
+ if (!n->name)
+ continue;
+ key = "qcom,pm-cluster-level";
+ if (!of_node_cmp(n->name, key)) {
+ if (parse_cluster_level(n, c))
+ goto failed_parse_cluster;
+ continue;
+ }
+
+ key = "qcom,pm-cluster";
+ if (!of_node_cmp(n->name, key)) {
+ struct lpm_cluster *child;
+
+ if (c->no_saw_devices)
+ pr_info("%s: SAW device not provided.\n",
+ __func__);
+
+ child = parse_cluster(n, c);
+ if (!child)
+ goto failed_parse_cluster;
+
+ of_node_put(n);
+ list_add(&child->list, &c->child);
+ cpumask_or(&c->child_cpus, &c->child_cpus,
+ &child->child_cpus);
+ c->aff_level = child->aff_level + 1;
+ continue;
+ }
+
+ key = "qcom,pm-cpu";
+ if (!of_node_cmp(n->name, key)) {
+ /*
+ * Parse the the cpu node only if a pm-cpu node
+ * is available, though the mask is defined @ the
+ * cluster level
+ */
+ if (get_cpumask_for_node(node, &c->child_cpus))
+ goto failed_parse_cluster;
+
+ if (parse_cpu_levels(n, c))
+ goto failed_parse_cluster;
+
+ c->aff_level = 1;
+
+ for_each_cpu(i, &c->child_cpus) {
+ per_cpu(max_residency, i) = devm_kzalloc(
+ &lpm_pdev->dev,
+ sizeof(uint32_t) * c->cpu->nlevels,
+ GFP_KERNEL);
+ if (!per_cpu(max_residency, i))
+ return ERR_PTR(-ENOMEM);
+ set_optimum_cpu_residency(c->cpu, i, true);
+ }
+ }
+ }
+
+ if (cpumask_intersects(&c->child_cpus, cpu_online_mask))
+ c->last_level = c->default_level;
+ else
+ c->last_level = c->nlevels-1;
+
+ for (i = 0; i < c->nlevels; i++) {
+ for (j = 0; j < c->nlevels; j++) {
+ if (i >= j) {
+ c->levels[i].pwr.residencies[j] = 0;
+ continue;
+ }
+ c->levels[i].pwr.residencies[j] = calculate_residency(
+ &c->levels[i].pwr, &c->levels[j].pwr);
+ }
+ }
+ set_optimum_cluster_residency(c, true);
+ return c;
+
+failed_parse_cluster:
+ pr_err("Failed parse cluster:%s\n", key);
+ if (parent)
+ list_del(&c->list);
+ free_cluster_node(c);
+failed_parse_params:
+ pr_err("Failed parse params\n");
+ return NULL;
+}
+struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev)
+{
+ struct device_node *top = NULL;
+ struct lpm_cluster *c;
+
+ use_psci = of_property_read_bool(pdev->dev.of_node, "qcom,use-psci");
+
+ top = of_find_node_by_name(pdev->dev.of_node, "qcom,pm-cluster");
+ if (!top) {
+ pr_err("Failed to find root node\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ lpm_pdev = pdev;
+ c = parse_cluster(top, NULL);
+ of_node_put(top);
+ return c;
+}
+
+void cluster_dt_walkthrough(struct lpm_cluster *cluster)
+{
+ struct list_head *list;
+ int i, j;
+ static int id;
+ char str[10] = {0};
+
+ if (!cluster)
+ return;
+
+ for (i = 0; i < id; i++)
+ snprintf(str+i, 10 - i, "\t");
+ pr_info("%d\n", __LINE__);
+
+ for (i = 0; i < cluster->nlevels; i++) {
+ struct lpm_cluster_level *l = &cluster->levels[i];
+
+ pr_info("%d ndevices:%d\n", __LINE__, cluster->ndevices);
+ for (j = 0; j < cluster->ndevices; j++)
+ pr_info("%sDevice: %pk id:%pk\n", str,
+ &cluster->name[j], &l->mode[i]);
+ }
+
+ if (cluster->cpu) {
+ pr_info("%d\n", __LINE__);
+ for (j = 0; j < cluster->cpu->nlevels; j++)
+ pr_info("%s\tCPU mode: %s id:%d\n", str,
+ cluster->cpu->levels[j].name,
+ cluster->cpu->levels[j].mode);
+ }
+
+ id++;
+
+
+ list_for_each(list, &cluster->child) {
+ struct lpm_cluster *n;
+
+ pr_info("%d\n", __LINE__);
+ n = list_entry(list, typeof(*n), list);
+ cluster_dt_walkthrough(n);
+ }
+ id--;
+}
diff --git a/drivers/cpuidle/lpm-levels-of.c b/drivers/cpuidle/lpm-levels-of.c
index 7a653c6..1d1d7e7 100644
--- a/drivers/cpuidle/lpm-levels-of.c
+++ b/drivers/cpuidle/lpm-levels-of.c
@@ -614,6 +614,7 @@
break;
}
}
+ of_node_put(cpu_node);
cpu_node = of_parse_phandle(node, "qcom,cpu", idx++);
}
@@ -651,13 +652,16 @@
cpu->nlevels++;
ret = parse_cpu_mode(n, l);
- if (ret)
+ if (ret) {
+ of_node_put(n);
return ret;
+ }
ret = parse_power_params(n, &l->pwr);
- if (ret)
+ if (ret) {
+ of_node_put(n);
return ret;
-
+ }
key = "qcom,use-broadcast-timer";
l->use_bc_timer = of_property_read_bool(n, key);
@@ -670,6 +674,7 @@
l->reset_level = LPM_RESET_LVL_NONE;
else if (ret)
return ret;
+ of_node_put(n);
}
for (i = 0; i < cpu->nlevels; i++) {
@@ -820,8 +825,11 @@
key = "qcom,pm-cluster-level";
if (!of_node_cmp(n->name, key)) {
- if (parse_cluster_level(n, c))
+ if (parse_cluster_level(n, c)) {
+ of_node_put(n);
goto failed_parse_cluster;
+ }
+ of_node_put(n);
continue;
}
@@ -830,22 +838,28 @@
struct lpm_cluster *child;
child = parse_cluster(n, c);
- if (!child)
+ if (!child) {
+ of_node_put(n);
goto failed_parse_cluster;
+ }
list_add(&child->list, &c->child);
cpumask_or(&c->child_cpus, &c->child_cpus,
&child->child_cpus);
c->aff_level = child->aff_level + 1;
+ of_node_put(n);
continue;
}
key = "qcom,pm-cpu";
if (!of_node_cmp(n->name, key)) {
- if (parse_cpu_levels(n, c))
+ if (parse_cpu_levels(n, c)) {
+ of_node_put(n);
goto failed_parse_cluster;
+ }
c->aff_level = 1;
+ of_node_put(n);
}
}
@@ -879,6 +893,7 @@
struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev)
{
struct device_node *top = NULL;
+ struct lpm_cluster *c;
top = of_find_node_by_name(pdev->dev.of_node, "qcom,pm-cluster");
if (!top) {
@@ -887,7 +902,9 @@
}
lpm_pdev = pdev;
- return parse_cluster(top, NULL);
+ c = parse_cluster(top, NULL);
+ of_node_put(top);
+ return c;
}
void cluster_dt_walkthrough(struct lpm_cluster *cluster)
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index 9694225..463589a 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -341,6 +341,11 @@
{
unsigned int cpu = raw_smp_processor_id();
struct hrtimer *cpu_histtimer = &per_cpu(histtimer, cpu);
+ ktime_t time_rem;
+
+ time_rem = hrtimer_get_remaining(cpu_histtimer);
+ if (ktime_to_us(time_rem) <= 0)
+ return;
hrtimer_try_to_cancel(cpu_histtimer);
}
@@ -386,11 +391,21 @@
{
int cpu = raw_smp_processor_id();
struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
+ ktime_t time_rem;
- hrtimer_try_to_cancel(&cluster->histtimer);
+ time_rem = hrtimer_get_remaining(&cluster->histtimer);
+ if (ktime_to_us(time_rem) > 0)
+ hrtimer_try_to_cancel(&cluster->histtimer);
- if (cluster->parent)
+ if (cluster->parent) {
+ time_rem = hrtimer_get_remaining(
+ &cluster->parent->histtimer);
+
+ if (ktime_to_us(time_rem) <= 0)
+ return;
+
hrtimer_try_to_cancel(&cluster->parent->histtimer);
+ }
}
static enum hrtimer_restart clusttimer_fn(struct hrtimer *h)
@@ -1394,11 +1409,11 @@
dev->last_residency = ktime_us_delta(ktime_get(), start);
update_history(dev, idx);
trace_cpu_idle_exit(idx, success);
- local_irq_enable();
if (lpm_prediction && cpu->lpm_prediction) {
histtimer_cancel();
clusttimer_cancel();
}
+ local_irq_enable();
return idx;
}
diff --git a/drivers/cpuidle/lpm-workarounds.c b/drivers/cpuidle/lpm-workarounds.c
new file mode 100644
index 0000000..657e2b9
--- /dev/null
+++ b/drivers/cpuidle/lpm-workarounds.c
@@ -0,0 +1,147 @@
+/* Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <lpm-workarounds.h>
+
+static struct regulator *lpm_cx_reg;
+static struct work_struct dummy_vote_work;
+static struct workqueue_struct *lpm_wa_wq;
+static bool lpm_wa_cx_turbo_unvote;
+static bool skip_l2_spm;
+
+/* While exiting from RPM assisted power collapse on some targets like MSM8939
+ * the CX is bumped to turbo mode by RPM. To reduce the power impact, APSS
+ * low power driver need to remove the CX turbo vote.
+ */
+static void send_dummy_cx_vote(struct work_struct *w)
+{
+ if (lpm_cx_reg) {
+ regulator_set_voltage(lpm_cx_reg,
+ RPM_REGULATOR_CORNER_SUPER_TURBO,
+ RPM_REGULATOR_CORNER_SUPER_TURBO);
+
+ regulator_set_voltage(lpm_cx_reg,
+ RPM_REGULATOR_CORNER_NONE,
+ RPM_REGULATOR_CORNER_SUPER_TURBO);
+ }
+}
+
+/*
+ * lpm_wa_cx_unvote_send(): Unvote for CX turbo mode
+ */
+void lpm_wa_cx_unvote_send(void)
+{
+ if (lpm_wa_cx_turbo_unvote)
+ queue_work(lpm_wa_wq, &dummy_vote_work);
+}
+EXPORT_SYMBOL(lpm_wa_cx_unvote_send);
+
+static int lpm_wa_cx_unvote_init(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ lpm_cx_reg = devm_regulator_get(&pdev->dev, "lpm-cx");
+ if (IS_ERR(lpm_cx_reg)) {
+ ret = PTR_ERR(lpm_cx_reg);
+ if (ret != -EPROBE_DEFER)
+ pr_err("Unable to get the CX regulator\n");
+ return ret;
+ }
+
+ INIT_WORK(&dummy_vote_work, send_dummy_cx_vote);
+
+ lpm_wa_wq = alloc_workqueue("lpm-wa",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
+
+ return ret;
+}
+
+static int lpm_wa_cx_unvote_exit(void)
+{
+ if (lpm_wa_wq)
+ destroy_workqueue(lpm_wa_wq);
+
+ return 0;
+}
+
+bool lpm_wa_get_skip_l2_spm(void)
+{
+ return skip_l2_spm;
+}
+EXPORT_SYMBOL(lpm_wa_get_skip_l2_spm);
+
+static int lpm_wa_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ lpm_wa_cx_turbo_unvote = of_property_read_bool(pdev->dev.of_node,
+ "qcom,lpm-wa-cx-turbo-unvote");
+ if (lpm_wa_cx_turbo_unvote) {
+ ret = lpm_wa_cx_unvote_init(pdev);
+ if (ret) {
+ pr_err("%s: Failed to initialize lpm_wa_cx_unvote (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+
+ skip_l2_spm = of_property_read_bool(pdev->dev.of_node,
+ "qcom,lpm-wa-skip-l2-spm");
+
+ return ret;
+}
+
+static int lpm_wa_remove(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ if (lpm_wa_cx_turbo_unvote)
+ ret = lpm_wa_cx_unvote_exit();
+
+ return ret;
+}
+
+static const struct of_device_id lpm_wa_mtch_tbl[] = {
+ {.compatible = "qcom,lpm-workarounds"},
+ {},
+};
+
+static struct platform_driver lpm_wa_driver = {
+ .probe = lpm_wa_probe,
+ .remove = lpm_wa_remove,
+ .driver = {
+ .name = "lpm-workarounds",
+ .owner = THIS_MODULE,
+ .of_match_table = lpm_wa_mtch_tbl,
+ },
+};
+
+static int __init lpm_wa_module_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&lpm_wa_driver);
+ if (ret)
+ pr_info("Error registering %s\n", lpm_wa_driver.driver.name);
+
+ return ret;
+}
+late_initcall(lpm_wa_module_init);
diff --git a/drivers/cpuidle/lpm-workarounds.h b/drivers/cpuidle/lpm-workarounds.h
new file mode 100644
index 0000000..a290dcb
--- /dev/null
+++ b/drivers/cpuidle/lpm-workarounds.h
@@ -0,0 +1,20 @@
+/* Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LPM_WA_H
+#define __LPM_WA_H
+
+void lpm_wa_cx_unvote_send(void);
+bool lpm_wa_get_skip_l2_spm(void);
+
+#endif /* __LPM_WA_H */
diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c
index 3aa75aa..f15267e 100644
--- a/drivers/crypto/msm/ice.c
+++ b/drivers/crypto/msm/ice.c
@@ -152,6 +152,9 @@
return -EPERM;
}
+ if (!setting)
+ return -EINVAL;
+
if ((short)(crypto_data->key_index) >= 0) {
memcpy(&setting->crypto_data, crypto_data,
@@ -1488,7 +1491,7 @@
bool is_pfe = false;
sector_t data_size;
- if (!pdev || !req || !setting) {
+ if (!pdev || !req) {
pr_err("%s: Invalid params passed\n", __func__);
return -EINVAL;
}
@@ -1507,6 +1510,7 @@
/* It is not an error to have a request with no bio */
return 0;
}
+ //pr_err("%s bio is %pK\n", __func__, req->bio);
ret = pfk_load_key_start(req->bio, &pfk_crypto_data, &is_pfe, async);
if (is_pfe) {
@@ -1664,7 +1668,7 @@
list_for_each_entry(ice_dev, &ice_devices, list) {
if (!strcmp(ice_dev->ice_instance_type, storage_type)) {
- pr_info("%s: found ice device %p\n", __func__, ice_dev);
+ pr_debug("%s: ice device %pK\n", __func__, ice_dev);
return ice_dev;
}
}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index 32bc3eb..83ef9ae 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -461,7 +461,7 @@
}
ctrl->hw.base = ptr;
- pr_debug("[%s] map dsi_ctrl registers to %p\n", ctrl->name,
+ pr_debug("[%s] map dsi_ctrl registers to %pK\n", ctrl->name,
ctrl->hw.base);
switch (ctrl->version) {
@@ -1338,10 +1338,20 @@
u32 current_read_len = 0, total_bytes_read = 0;
bool short_resp = false;
bool read_done = false;
- u32 dlen, diff, rlen = msg->rx_len;
+ u32 dlen, diff, rlen;
unsigned char *buff;
char cmd;
+ struct dsi_cmd_desc *of_cmd;
+ if (!msg) {
+ pr_err("Invalid msg\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ of_cmd = container_of(msg, struct dsi_cmd_desc, msg);
+
+ rlen = msg->rx_len;
if (msg->rx_len <= 2) {
short_resp = true;
rd_pkt_size = msg->rx_len;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
index b059fc5..6ac7dd7 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
@@ -60,6 +60,18 @@
#define DSI_CTRL_MAX_CMD_FIFO_STORE_SIZE 64
/**
+ * enum dsi_channel_id - defines dsi channel id.
+ * @DSI_CTRL_LEFT: DSI 0 channel
+ * @DSI_CTRL_RIGHT: DSI 1 channel
+ * @DSI_CTRL_MAX: Maximum value.
+ */
+enum dsi_channel_id {
+ DSI_CTRL_LEFT = 0,
+ DSI_CTRL_RIGHT,
+ DSI_CTRL_MAX,
+};
+
+/**
* enum dsi_power_state - defines power states for dsi controller.
* @DSI_CTRL_POWER_VREG_OFF: Digital and analog supplies for DSI controller
turned off
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 7194f1a..77e0bb3 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -614,12 +614,20 @@
static int dsi_display_status_reg_read(struct dsi_display *display)
{
- int rc = 0, i;
+ int rc = 0, i, cmd_channel_idx = DSI_CTRL_LEFT;
struct dsi_display_ctrl *m_ctrl, *ctrl;
pr_debug(" ++\n");
- m_ctrl = &display->ctrl[display->cmd_master_idx];
+ /*
+ * Check the Panel DSI command channel.
+ * If the cmd_channel is set, then we should
+ * choose the right DSI(DSI1) controller to send command,
+ * else we choose the left(DSI0) controller.
+ */
+ if (display->panel->esd_config.cmd_channel)
+ cmd_channel_idx = DSI_CTRL_RIGHT;
+ m_ctrl = &display->ctrl[cmd_channel_idx];
if (display->tx_cmd_buf == NULL) {
rc = dsi_host_alloc_cmd_tx_buffer(display);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index cb9c1fa..dab85f4 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -2776,6 +2776,9 @@
esd_config->groups * status_len);
}
+ esd_config->cmd_channel = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-panel-cmds-only-by-right");
+
return 0;
error4:
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index f8b65ab..c0ecb7f 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -129,6 +129,7 @@
struct drm_panel_esd_config {
bool esd_enabled;
+ bool cmd_channel;
enum esd_check_status_mode status_mode;
struct dsi_panel_cmd_set status_cmd;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
index 2e2d0d8..989dc3d 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
@@ -107,7 +107,8 @@
phy->hw.base = ptr;
- pr_debug("[%s] map dsi_phy registers to %p\n", phy->name, phy->hw.base);
+ pr_debug("[%s] map dsi_phy registers to %pK\n",
+ phy->name, phy->hw.base);
return rc;
}
diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c
index 0940e84..2c9d116 100644
--- a/drivers/gpu/drm/msm/edp/edp.c
+++ b/drivers/gpu/drm/msm/edp/edp.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015,2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -54,7 +54,7 @@
ret = -ENOMEM;
goto fail;
}
- DBG("eDP probed=%p", edp);
+ DBG("eDP probed=%pK", edp);
edp->pdev = pdev;
platform_set_drvdata(pdev, edp);
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 95bdc36..8ffe044 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -25,6 +25,8 @@
#include "msm_fence.h"
#include "sde_trace.h"
+#define MULTIPLE_CONN_DETECTED(x) (x > 1)
+
struct msm_commit {
struct drm_device *dev;
struct drm_atomic_state *state;
@@ -111,6 +113,66 @@
kfree(c);
}
+static inline bool _msm_seamless_for_crtc(struct drm_atomic_state *state,
+ struct drm_crtc_state *crtc_state, bool enable)
+{
+ struct drm_connector *connector = NULL;
+ struct drm_connector_state *conn_state = NULL;
+ int i = 0;
+ int conn_cnt = 0;
+
+ if (msm_is_mode_seamless(&crtc_state->mode) ||
+ msm_is_mode_seamless_vrr(&crtc_state->adjusted_mode))
+ return true;
+
+ if (msm_is_mode_seamless_dms(&crtc_state->adjusted_mode) && !enable)
+ return true;
+
+ if (!crtc_state->mode_changed && crtc_state->connectors_changed) {
+ for_each_connector_in_state(state, connector, conn_state, i) {
+ if ((conn_state->crtc == crtc_state->crtc) ||
+ (connector->state->crtc ==
+ crtc_state->crtc))
+ conn_cnt++;
+
+ if (MULTIPLE_CONN_DETECTED(conn_cnt))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static inline bool _msm_seamless_for_conn(struct drm_connector *connector,
+ struct drm_connector_state *old_conn_state, bool enable)
+{
+ if (!old_conn_state || !old_conn_state->crtc)
+ return false;
+
+ if (!old_conn_state->crtc->state->mode_changed &&
+ !old_conn_state->crtc->state->active_changed &&
+ old_conn_state->crtc->state->connectors_changed) {
+ if (old_conn_state->crtc == connector->state->crtc)
+ return true;
+ }
+
+ if (enable)
+ return false;
+
+ if (msm_is_mode_seamless(&connector->encoder->crtc->state->mode))
+ return true;
+
+ if (msm_is_mode_seamless_vrr(
+ &connector->encoder->crtc->state->adjusted_mode))
+ return true;
+
+ if (msm_is_mode_seamless_dms(
+ &connector->encoder->crtc->state->adjusted_mode))
+ return true;
+
+ return false;
+}
+
static void msm_atomic_wait_for_commit_done(
struct drm_device *dev,
struct drm_atomic_state *old_state)
@@ -174,14 +236,7 @@
if (WARN_ON(!encoder))
continue;
- if (msm_is_mode_seamless(
- &connector->encoder->crtc->state->mode) ||
- msm_is_mode_seamless_vrr(
- &connector->encoder->crtc->state->adjusted_mode))
- continue;
-
- if (msm_is_mode_seamless_dms(
- &connector->encoder->crtc->state->adjusted_mode))
+ if (_msm_seamless_for_conn(connector, old_conn_state, false))
continue;
funcs = encoder->helper_private;
@@ -223,11 +278,7 @@
if (!old_crtc_state->active)
continue;
- if (msm_is_mode_seamless(&crtc->state->mode) ||
- msm_is_mode_seamless_vrr(&crtc->state->adjusted_mode))
- continue;
-
- if (msm_is_mode_seamless_dms(&crtc->state->adjusted_mode))
+ if (_msm_seamless_for_crtc(old_state, crtc->state, false))
continue;
funcs = crtc->helper_private;
@@ -286,8 +337,14 @@
mode = &new_crtc_state->mode;
adjusted_mode = &new_crtc_state->adjusted_mode;
- if (!new_crtc_state->mode_changed)
+ if (!new_crtc_state->mode_changed &&
+ new_crtc_state->connectors_changed) {
+ if (_msm_seamless_for_conn(connector,
+ old_conn_state, false))
+ continue;
+ } else if (!new_crtc_state->mode_changed) {
continue;
+ }
DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n",
encoder->base.id, encoder->name);
@@ -365,8 +422,7 @@
if (!crtc->state->active)
continue;
- if (msm_is_mode_seamless(&crtc->state->mode) ||
- msm_is_mode_seamless_vrr(&crtc->state->adjusted_mode))
+ if (_msm_seamless_for_crtc(old_state, crtc->state, true))
continue;
funcs = crtc->helper_private;
@@ -397,6 +453,9 @@
connector->state->crtc->state))
continue;
+ if (_msm_seamless_for_conn(connector, old_conn_state, true))
+ continue;
+
encoder = connector->state->best_encoder;
funcs = encoder->helper_private;
@@ -444,6 +503,9 @@
connector->state->crtc->state))
continue;
+ if (_msm_seamless_for_conn(connector, old_conn_state, true))
+ continue;
+
encoder = connector->state->best_encoder;
DRM_DEBUG_ATOMIC("bridge enable enabling [ENCODER:%d:%s]\n",
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 0697db8..9eb62fe 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -156,7 +156,8 @@
}
if (reglog)
- printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size);
+ dev_dbg(&pdev->dev, "IO:region %s %pK %08lx\n",
+ dbgname, ptr, size);
return ptr;
}
@@ -187,7 +188,7 @@
void msm_writel(u32 data, void __iomem *addr)
{
if (reglog)
- printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
+ pr_debug("IO:W %pK %08x\n", addr, data);
writel(data, addr);
}
@@ -196,7 +197,7 @@
u32 val = readl(addr);
if (reglog)
- printk(KERN_ERR "IO:R %p %08x\n", addr, val);
+ pr_err("IO:R %pK %08x\n", addr, val);
return val;
}
@@ -1024,7 +1025,7 @@
if (!kms)
return -ENXIO;
- DBG("dev=%p, crtc=%u", dev, pipe);
+ DBG("dev=%pK, crtc=%u", dev, pipe);
return vblank_ctrl_queue_work(priv, pipe, true);
}
@@ -1035,7 +1036,7 @@
if (!kms)
return;
- DBG("dev=%p, crtc=%u", dev, pipe);
+ DBG("dev=%pK, crtc=%u", dev, pipe);
vblank_ctrl_queue_work(priv, pipe, false);
}
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index e8bf244..a1c9d82 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -68,7 +68,7 @@
msm_fb = to_msm_framebuffer(fb);
n = drm_format_num_planes(fb->pixel_format);
- DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
+ DBG("destroy: FB ID: %d (%pK)", fb->base.id, fb);
drm_framebuffer_cleanup(fb);
@@ -336,7 +336,7 @@
unsigned int hsub, vsub;
bool is_modified = false;
- DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
+ DBG("create framebuffer: dev=%pK, mode_cmd=%pK (%dx%d@%4.4s)",
dev, mode_cmd, mode_cmd->width, mode_cmd->height,
(char *)&mode_cmd->pixel_format);
@@ -420,7 +420,7 @@
goto fail;
}
- DBG("create: FB ID: %d (%p)", fb->base.id, fb);
+ DBG("create: FB ID: %d (%pK)", fb->base.id, fb);
return fb;
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index ffd4a33..5b886d0 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -142,7 +142,7 @@
goto fail_unlock;
}
- DBG("fbi=%p, dev=%p", fbi, dev);
+ DBG("fbi=%pK, dev=%pK", fbi, dev);
fbdev->fb = fb;
helper->fb = fb;
@@ -167,7 +167,7 @@
fbi->fix.smem_start = paddr;
fbi->fix.smem_len = fbdev->bo->size;
- DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
+ DBG("par=%pK, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 277b421..ddd4607 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -253,7 +253,7 @@
pfn = page_to_pfn(pages[pgoff]);
- VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+ VERB("Inserting %pK pfn %lx, pa %lx", vmf->virtual_address,
pfn, pfn << PAGE_SHIFT);
ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
@@ -428,7 +428,7 @@
if (!ret && domain) {
*iova = domain->iova;
- if (aspace && aspace->domain_attached)
+ if (aspace && !msm_obj->in_active_list)
msm_gem_add_obj_to_aspace_active_list(aspace, obj);
} else {
obj_remove_domain(domain);
@@ -799,7 +799,7 @@
break;
}
- seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n",
+ seq_printf(m, "%08x: %c %2d (%2d) %08llx %pK %zu%s\n",
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
obj->name, obj->refcount.refcount.counter,
@@ -968,6 +968,7 @@
INIT_LIST_HEAD(&msm_obj->domains);
INIT_LIST_HEAD(&msm_obj->iova_list);
msm_obj->aspace = NULL;
+ msm_obj->in_active_list = false;
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 8521bea..ba01ffb 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -124,6 +124,7 @@
struct list_head iova_list;
struct msm_gem_address_space *aspace;
+ bool in_active_list;
};
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index d02228a..e5b1cc37 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -71,6 +71,7 @@
{
WARN_ON(!mutex_is_locked(&aspace->dev->struct_mutex));
list_move_tail(&msm_obj->iova_list, &aspace->active_list);
+ msm_obj->in_active_list = true;
}
static void smmu_aspace_remove_from_active(
@@ -84,6 +85,7 @@
list_for_each_entry_safe(msm_obj, next, &aspace->active_list,
iova_list) {
if (msm_obj == obj) {
+ msm_obj->in_active_list = false;
list_del(&msm_obj->iova_list);
break;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index cefa513..75bd40c 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -4848,7 +4848,7 @@
{
struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
- SDE_DEBUG("%s: cancel: %p\n", sde_crtc->name, file);
+ SDE_DEBUG("%s: cancel: %pK\n", sde_crtc->name, file);
_sde_crtc_complete_flip(crtc, file);
}
@@ -5692,6 +5692,73 @@
}
DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_crtc_debugfs_state);
+static int _sde_debugfs_fence_status_show(struct seq_file *s, void *data)
+{
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+ struct drm_connector *conn;
+ struct drm_mode_object *drm_obj;
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *cstate;
+ struct sde_fence_context *ctx;
+
+ if (!s || !s->private)
+ return -EINVAL;
+
+ sde_crtc = s->private;
+ crtc = &sde_crtc->base;
+ cstate = to_sde_crtc_state(crtc->state);
+
+ /* Dump input fence info */
+ seq_puts(s, "===Input fence===\n");
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ struct sde_plane_state *pstate;
+ struct fence *fence;
+
+ pstate = to_sde_plane_state(plane->state);
+ if (!pstate)
+ continue;
+
+ seq_printf(s, "plane:%u stage:%d\n", plane->base.id,
+ pstate->stage);
+
+ fence = pstate->input_fence;
+ if (fence)
+ sde_fence_list_dump(fence, &s);
+ }
+
+ /* Dump release fence info */
+ seq_puts(s, "\n");
+ seq_puts(s, "===Release fence===\n");
+ ctx = &sde_crtc->output_fence;
+ drm_obj = &crtc->base;
+ sde_debugfs_timeline_dump(ctx, drm_obj, &s);
+ seq_puts(s, "\n");
+
+ /* Dump retire fence info */
+ seq_puts(s, "===Retire fence===\n");
+ drm_for_each_connector(conn, crtc->dev)
+ if (conn->state && conn->state->crtc == crtc &&
+ cstate->num_connectors < MAX_CONNECTORS) {
+ struct sde_connector *c_conn;
+
+ c_conn = to_sde_connector(conn);
+ ctx = &c_conn->retire_fence;
+ drm_obj = &conn->base;
+ sde_debugfs_timeline_dump(ctx, drm_obj, &s);
+ }
+
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int _sde_debugfs_fence_status(struct inode *inode, struct file *file)
+{
+ return single_open(file, _sde_debugfs_fence_status_show,
+ inode->i_private);
+}
+
static int _sde_crtc_init_debugfs(struct drm_crtc *crtc)
{
struct sde_crtc *sde_crtc;
@@ -5708,6 +5775,10 @@
.read = _sde_crtc_misr_read,
.write = _sde_crtc_misr_setup,
};
+ static const struct file_operations debugfs_fence_fops = {
+ .open = _sde_debugfs_fence_status,
+ .read = seq_read,
+ };
if (!crtc)
return -EINVAL;
@@ -5732,6 +5803,8 @@
&sde_crtc_debugfs_state_fops);
debugfs_create_file("misr_data", 0600, sde_crtc->debugfs_root,
sde_crtc, &debugfs_misr_fops);
+ debugfs_create_file("fence_status", 0400, sde_crtc->debugfs_root,
+ sde_crtc, &debugfs_fence_fops);
return 0;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.c b/drivers/gpu/drm/msm/sde/sde_fence.c
index 1f30a5c..8ffbb98 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.c
+++ b/drivers/gpu/drm/msm/sde/sde_fence.c
@@ -431,3 +431,55 @@
obj_name, drm_obj->id, drm_obj->type, ctx->done_count,
ctx->commit_count);
}
+
+void sde_fence_list_dump(struct fence *fence, struct seq_file **s)
+{
+ char timeline_str[TIMELINE_VAL_LENGTH];
+
+ if (fence->ops->timeline_value_str)
+ fence->ops->timeline_value_str(fence,
+ timeline_str, TIMELINE_VAL_LENGTH);
+
+ seq_printf(*s, "fence name:%s timeline name:%s seqno:0x%x timeline:%s signaled:0x%x\n",
+ fence->ops->get_driver_name(fence),
+ fence->ops->get_timeline_name(fence),
+ fence->seqno, timeline_str,
+ fence->ops->signaled ?
+ fence->ops->signaled(fence) : 0xffffffff);
+}
+
+void sde_debugfs_timeline_dump(struct sde_fence_context *ctx,
+ struct drm_mode_object *drm_obj, struct seq_file **s)
+{
+ char *obj_name;
+ struct sde_fence *fc, *next;
+ struct fence *fence;
+
+ if (!ctx || !drm_obj) {
+ SDE_ERROR("invalid input params\n");
+ return;
+ }
+
+ switch (drm_obj->type) {
+ case DRM_MODE_OBJECT_CRTC:
+ obj_name = "crtc";
+ break;
+ case DRM_MODE_OBJECT_CONNECTOR:
+ obj_name = "connector";
+ break;
+ default:
+ obj_name = "unknown";
+ break;
+ }
+
+ seq_printf(*s, "drm obj:%s id:%d type:0x%x done_count:%d commit_count:%d\n",
+ obj_name, drm_obj->id, drm_obj->type, ctx->done_count,
+ ctx->commit_count);
+
+ spin_lock(&ctx->list_lock);
+ list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
+ fence = &fc->base;
+ sde_fence_list_dump(fence, s);
+ }
+ spin_unlock(&ctx->list_lock);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.h b/drivers/gpu/drm/msm/sde/sde_fence.h
index 7891be4..7d7fd02 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.h
+++ b/drivers/gpu/drm/msm/sde/sde_fence.h
@@ -153,6 +153,22 @@
void sde_fence_timeline_status(struct sde_fence_context *ctx,
struct drm_mode_object *drm_obj);
+/**
+ * sde_fence_timeline_dump - utility to dump fence list info in debugfs node
+ * @fence: Pointer fence container
+ * @drm_obj: Pointer to drm object associated with fence timeline
+ * @s: used to writing on debugfs node
+ */
+void sde_debugfs_timeline_dump(struct sde_fence_context *ctx,
+ struct drm_mode_object *drm_obj, struct seq_file **s);
+
+/**
+ * sde_fence_timeline_status - dumps fence timeline in debugfs node
+ * @fence: Pointer fence container
+ * @s: used to writing on debugfs node
+ */
+void sde_fence_list_dump(struct fence *fence, struct seq_file **s);
+
#else
static inline void *sde_sync_get(uint64_t fd)
{
@@ -212,6 +228,18 @@
{
/* do nothing */
}
+
+void sde_debugfs_timeline_dump(struct sde_fence_context *ctx,
+ struct drm_mode_object *drm_obj, struct seq_file **s)
+{
+ /* do nothing */
+}
+
+void sde_fence_list_dump(struct fence *fence, struct seq_file **s)
+{
+ /* do nothing */
+}
+
#endif /* IS_ENABLED(CONFIG_SW_SYNC) */
#endif /* _SDE_FENCE_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
index 9e64d78..c7989cd 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
@@ -680,7 +680,7 @@
void __iomem *base;
if (!hw_cfg || (hw_cfg->len != sizeof(*pcc) && hw_cfg->payload)) {
- DRM_ERROR("invalid params hw %p payload %p payloadsize %d \"\
+ DRM_ERROR("invalid params hw %pK payload %pK payloadsize %d \"\
exp size %zd\n",
hw_cfg, ((hw_cfg) ? hw_cfg->payload : NULL),
((hw_cfg) ? hw_cfg->len : 0), sizeof(*pcc));
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index b0a52a7..c2fffef 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -2151,8 +2151,8 @@
aspace->mmu->funcs->attach(mmu, (const char **)iommu_ports,
ARRAY_SIZE(iommu_ports));
- msm_gem_aspace_domain_attach_detach_update(aspace, false);
aspace->domain_attached = true;
+ msm_gem_aspace_domain_attach_detach_update(aspace, false);
}
return 0;
@@ -2664,21 +2664,22 @@
mutex_lock(&dev->mode_config.mutex);
connector_list = &dev->mode_config.connector_list;
- list_for_each_entry(conn_iter, connector_list, head) {
- /**
- * SDE_KMS doesn't attach more than one encoder to
- * a DSI connector. So it is safe to check only with the
- * first encoder entry. Revisit this logic if we ever have
- * to support continuous splash for external displays in MST
- * configuration.
- */
- if (conn_iter &&
- (conn_iter->encoder_ids[0] == encoder->base.id)) {
- connector = conn_iter;
- break;
+ if (connector_list) {
+ list_for_each_entry(conn_iter, connector_list, head) {
+ /**
+ * SDE_KMS doesn't attach more than one encoder to
+ * a DSI connector. So it is safe to check only with
+ * the first encoder entry. Revisit this logic if we
+ * ever have to support continuous splash for
+ * external displays in MST configuration.
+ */
+ if (conn_iter &&
+ (conn_iter->encoder_ids[0] == encoder->base.id)) {
+ connector = conn_iter;
+ break;
+ }
}
}
-
if (!connector) {
SDE_ERROR("connector not initialized\n");
mutex_unlock(&dev->mode_config.mutex);
@@ -3195,7 +3196,7 @@
sde_kms->mmio = NULL;
goto error;
}
- DRM_INFO("mapped mdp address space @%p\n", sde_kms->mmio);
+ DRM_INFO("mapped mdp address space @%pK\n", sde_kms->mmio);
sde_kms->mmio_len = msm_iomap_size(dev->platformdev, "mdp_phys");
rc = sde_dbg_reg_register_base(SDE_DBG_NAME, sde_kms->mmio,
diff --git a/drivers/gpu/msm/adreno_a5xx_snapshot.c b/drivers/gpu/msm/adreno_a5xx_snapshot.c
index d1a6005..4bde8c6 100644
--- a/drivers/gpu/msm/adreno_a5xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a5xx_snapshot.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -412,6 +412,15 @@
0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0,
/* VPC CTX 1 */
0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2,
+};
+
+/*
+ * GPMU registers to dump for A5XX on snapshot.
+ * Registers in pairs - first value is the start offset, second
+ * is the stop offset (inclusive)
+ */
+
+static const unsigned int a5xx_gpmu_registers[] = {
/* GPMU */
0xA800, 0xA8FF, 0xAC60, 0xAC60,
};
@@ -664,24 +673,23 @@
return kgsl_snapshot_dump_registers(device, buf, remain, &pre_cdregs);
}
+struct registers {
+ const unsigned int *regs;
+ size_t size;
+};
+
static size_t a5xx_legacy_snapshot_registers(struct kgsl_device *device,
- u8 *buf, size_t remain)
+ u8 *buf, size_t remain, const unsigned int *regs, size_t size)
{
- struct kgsl_snapshot_registers regs = {
- .regs = a5xx_registers,
- .count = ARRAY_SIZE(a5xx_registers) / 2,
+ struct kgsl_snapshot_registers snapshot_regs = {
+ .regs = regs,
+ .count = size / 2,
};
- return kgsl_snapshot_dump_registers(device, buf, remain, ®s);
+ return kgsl_snapshot_dump_registers(device, buf, remain,
+ &snapshot_regs);
}
-static struct cdregs {
- const unsigned int *regs;
- unsigned int size;
-} _a5xx_cd_registers[] = {
- { a5xx_registers, ARRAY_SIZE(a5xx_registers) },
-};
-
#define REG_PAIR_COUNT(_a, _i) \
(((_a)[(2 * (_i)) + 1] - (_a)[2 * (_i)]) + 1)
@@ -691,11 +699,13 @@
struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
unsigned int *data = (unsigned int *)(buf + sizeof(*header));
unsigned int *src = (unsigned int *) registers.hostptr;
- unsigned int i, j, k;
+ struct registers *regs = (struct registers *)priv;
+ unsigned int j, k;
unsigned int count = 0;
if (crash_dump_valid == false)
- return a5xx_legacy_snapshot_registers(device, buf, remain);
+ return a5xx_legacy_snapshot_registers(device, buf, remain,
+ regs->regs, regs->size);
if (remain < sizeof(*header)) {
SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
@@ -704,24 +714,20 @@
remain -= sizeof(*header);
- for (i = 0; i < ARRAY_SIZE(_a5xx_cd_registers); i++) {
- struct cdregs *regs = &_a5xx_cd_registers[i];
+ for (j = 0; j < regs->size / 2; j++) {
+ unsigned int start = regs->regs[2 * j];
+ unsigned int end = regs->regs[(2 * j) + 1];
- for (j = 0; j < regs->size / 2; j++) {
- unsigned int start = regs->regs[2 * j];
- unsigned int end = regs->regs[(2 * j) + 1];
+ if (remain < ((end - start) + 1) * 8) {
+ SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+ goto out;
+ }
- if (remain < ((end - start) + 1) * 8) {
- SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
- goto out;
- }
+ remain -= ((end - start) + 1) * 8;
- remain -= ((end - start) + 1) * 8;
-
- for (k = start; k <= end; k++, count++) {
- *data++ = k;
- *data++ = *src++;
- }
+ for (k = start; k <= end; k++, count++) {
+ *data++ = k;
+ *data++ = *src++;
}
}
@@ -861,6 +867,7 @@
struct adreno_snapshot_data *snap_data = gpudev->snapshot_data;
unsigned int reg, i;
struct adreno_ringbuffer *rb;
+ struct registers regs;
/* Disable Clock gating temporarily for the debug bus to work */
a5xx_hwcg_set(adreno_dev, false);
@@ -877,8 +884,20 @@
/* Try to run the crash dumper */
_a5xx_do_crashdump(device);
- kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
- snapshot, a5xx_snapshot_registers, NULL);
+ regs.regs = a5xx_registers;
+ regs.size = ARRAY_SIZE(a5xx_registers);
+
+ kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS, snapshot,
+ a5xx_snapshot_registers, ®s);
+
+ if (ADRENO_FEATURE(adreno_dev, ADRENO_GPMU)) {
+ regs.regs = a5xx_gpmu_registers;
+ regs.size = ARRAY_SIZE(a5xx_gpmu_registers);
+
+ kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
+ snapshot, a5xx_snapshot_registers, ®s);
+ }
+
/* Dump SP TP HLSQ registers */
kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS, snapshot,
@@ -1035,17 +1054,23 @@
* To save the registers, we need 16 bytes per register pair for the
* script and a dword for each register int the data
*/
- for (i = 0; i < ARRAY_SIZE(_a5xx_cd_registers); i++) {
- struct cdregs *regs = &_a5xx_cd_registers[i];
+ /* Each pair needs 16 bytes (2 qwords) */
+ script_size += (ARRAY_SIZE(a5xx_registers) / 2) * 16;
+
+ /* Each register needs a dword in the data */
+ for (j = 0; j < ARRAY_SIZE(a5xx_registers) / 2; j++)
+ data_size += REG_PAIR_COUNT(a5xx_registers, j) *
+ sizeof(unsigned int);
+
+ if (ADRENO_FEATURE(adreno_dev, ADRENO_GPMU)) {
/* Each pair needs 16 bytes (2 qwords) */
- script_size += (regs->size / 2) * 16;
+ script_size += (ARRAY_SIZE(a5xx_gpmu_registers) / 2) * 16;
/* Each register needs a dword in the data */
- for (j = 0; j < regs->size / 2; j++)
- data_size += REG_PAIR_COUNT(regs->regs, j) *
+ for (j = 0; j < ARRAY_SIZE(a5xx_gpmu_registers) / 2; j++)
+ data_size += REG_PAIR_COUNT(a5xx_gpmu_registers, j) *
sizeof(unsigned int);
-
}
/*
@@ -1083,13 +1108,21 @@
ptr = (uint64_t *) capturescript.hostptr;
/* For the registers, program a read command for each pair */
- for (i = 0; i < ARRAY_SIZE(_a5xx_cd_registers); i++) {
- struct cdregs *regs = &_a5xx_cd_registers[i];
- for (j = 0; j < regs->size / 2; j++) {
- unsigned int r = REG_PAIR_COUNT(regs->regs, j);
+ for (j = 0; j < ARRAY_SIZE(a5xx_registers) / 2; j++) {
+ unsigned int r = REG_PAIR_COUNT(a5xx_registers, j);
+ *ptr++ = registers.gpuaddr + offset;
+ *ptr++ = (((uint64_t) a5xx_registers[2 * j]) << 44)
+ | r;
+ offset += r * sizeof(unsigned int);
+ }
+
+ if (ADRENO_FEATURE(adreno_dev, ADRENO_GPMU)) {
+ for (j = 0; j < ARRAY_SIZE(a5xx_gpmu_registers) / 2; j++) {
+ unsigned int r = REG_PAIR_COUNT(a5xx_gpmu_registers, j);
*ptr++ = registers.gpuaddr + offset;
- *ptr++ = (((uint64_t) regs->regs[2 * j]) << 44) | r;
+ *ptr++ = (((uint64_t) a5xx_gpmu_registers[2 * j]) << 44)
+ | r;
offset += r * sizeof(unsigned int);
}
}
diff --git a/drivers/hwtracing/coresight/coresight-ost.c b/drivers/hwtracing/coresight/coresight-ost.c
index a5075ba..340c589 100644
--- a/drivers/hwtracing/coresight/coresight-ost.c
+++ b/drivers/hwtracing/coresight/coresight-ost.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -280,14 +280,13 @@
int stm_set_ost_params(struct stm_drvdata *drvdata, size_t bitmap_size)
{
- stmdrvdata = drvdata;
-
drvdata->chs.bitmap = devm_kzalloc(drvdata->dev, bitmap_size,
GFP_KERNEL);
if (!drvdata->chs.bitmap)
return -ENOMEM;
bitmap_fill(drvdata->entities, OST_ENTITY_MAX);
+ stmdrvdata = drvdata;
return 0;
}
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 159512c..caeda7b 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* Description: CoreSight System Trace Macrocell driver
*
@@ -839,11 +839,6 @@
}
bitmap_size = BITS_TO_LONGS(drvdata->numsp) * sizeof(long);
- /* Store the driver data pointer for use in exported functions */
- ret = stm_set_ost_params(drvdata, bitmap_size);
- if (ret)
- return ret;
-
guaranteed = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
if (!guaranteed)
return -ENOMEM;
@@ -872,6 +867,11 @@
goto stm_unregister;
}
+ /* Store the driver data pointer for use in exported functions */
+ ret = stm_set_ost_params(drvdata, bitmap_size);
+ if (ret)
+ goto stm_unregister;
+
pm_runtime_put(&adev->dev);
dev_info(dev, "%s initialized\n", (char *)id->data);
diff --git a/drivers/iio/imu/inv_icm20602/inv_icm20602_bsp.c b/drivers/iio/imu/inv_icm20602/inv_icm20602_bsp.c
index bfff285..9968e44 100644
--- a/drivers/iio/imu/inv_icm20602/inv_icm20602_bsp.c
+++ b/drivers/iio/imu/inv_icm20602/inv_icm20602_bsp.c
@@ -95,12 +95,12 @@
#define W_FLG 0
#define R_FLG 1
int icm20602_bulk_read(struct inv_icm20602_state *st,
- int reg, char *buf, int size)
+ int reg, u8 *buf, int size)
{
int result = MPU_SUCCESS;
char tx_buf[2] = {0x0, 0x0};
int tmp_size = size;
- int tmp_buf = buf;
+ u8 *tmp_buf = buf;
struct i2c_msg msg[2];
if (!st || !buf)
@@ -109,38 +109,31 @@
if (st->interface == ICM20602_SPI) {
tx_buf[0] = ICM20602_READ_REG(reg);
result = spi_write_then_read(st->spi, &tx_buf[0],
- 1, tmp_buf, size);
+ 1, tmp_buf, size);
if (result) {
pr_err("mpu read reg %u failed, rc %d\n",
- reg, result);
+ reg, result);
result = -MPU_READ_FAIL;
}
} else {
result = size;
- while (tmp_size > 0) {
#ifdef ICM20602_I2C_SMBUS
- result += i2c_smbus_read_i2c_block_data(st->client,
- reg, (tmp_size < 32)?tmp_size:32, tmp_buf);
- tmp_size -= 32;
- tmp_buf += tmp_size;
+ result += i2c_smbus_read_i2c_block_data(st->client,
+ reg, size, tmp_buf);
#else
- tx_buf[0] = reg;
- msg[0].addr = st->client->addr;
- msg[0].flags = W_FLG;
- msg[0].len = 1;
- msg[0].buf = tx_buf;
+ tx_buf[0] = reg;
+ msg[0].addr = st->client->addr;
+ msg[0].flags = W_FLG;
+ msg[0].len = 1;
+ msg[0].buf = tx_buf;
- msg[1].addr = st->client->addr;
- msg[1].flags = I2C_M_RD;
- msg[1].len = (tmp_size < 32)?tmp_size:32;
- msg[1].buf = tmp_buf;
- i2c_transfer(st->client->adapter, msg, ARRAY_SIZE(msg));
- tmp_size -= 32;
- tmp_buf += tmp_size;
+ msg[1].addr = st->client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = size;
+ msg[1].buf = tmp_buf;
+ i2c_transfer(st->client->adapter, msg, ARRAY_SIZE(msg));
#endif
- }
}
-
return result;
}
@@ -355,6 +348,33 @@
return MPU_SUCCESS;
}
+int icm20602_int_status(struct inv_icm20602_state *st,
+ u8 *int_status)
+{
+ return icm20602_read_reg(st,
+ reg_set_20602.INT_STATUS.address, int_status);
+}
+
+int icm20602_int_wm_status(struct inv_icm20602_state *st,
+ u8 *int_status)
+{
+ return icm20602_read_reg(st,
+ reg_set_20602.FIFO_WM_INT_STATUS.address, int_status);
+}
+
+int icm20602_fifo_count(struct inv_icm20602_state *st,
+ u16 *fifo_count)
+{
+ u8 count_h, count_l;
+
+ *fifo_count = 0;
+ icm20602_read_reg(st, reg_set_20602.FIFO_COUNTH.address, &count_h);
+ icm20602_read_reg(st, reg_set_20602.FIFO_COUNTL.address, &count_l);
+ *fifo_count |= (count_h << 8);
+ *fifo_count |= count_l;
+ return MPU_SUCCESS;
+}
+
static int icm20602_config_waterlevel(struct inv_icm20602_state *st)
{
struct icm20602_user_config *config = NULL;
@@ -809,6 +829,7 @@
int result = MPU_SUCCESS;
struct icm20602_user_config *config = NULL;
int package_count;
+ int i;
config = st->config;
if (st == NULL || st->config == NULL) {
@@ -860,7 +881,7 @@
/* buffer malloc */
package_count = config->fifo_waterlevel / ICM20602_PACKAGE_SIZE;
- st->buf = kzalloc(sizeof(config->fifo_waterlevel * 2), GFP_ATOMIC);
+ st->buf = kzalloc(config->fifo_waterlevel * 2, GFP_ATOMIC);
if (!st->buf)
return -ENOMEM;
@@ -869,9 +890,26 @@
if (!st->data_push)
return -ENOMEM;
+ for (i = 0; i < package_count; i++) {
+ st->data_push[i].raw_data =
+ kzalloc(ICM20602_PACKAGE_SIZE, GFP_ATOMIC);
+ }
+
return result;
}
+int icm20602_reset_fifo(struct inv_icm20602_state *st)
+{
+ reg_set_20602.USER_CTRL.reg_u.REG.FIFO_RST = 0x1;
+ if (icm20602_write_reg_simple(st, reg_set_20602.USER_CTRL)) {
+ reg_set_20602.USER_CTRL.reg_u.REG.FIFO_RST = 0x0;
+ return -MPU_FAIL;
+ }
+ reg_set_20602.USER_CTRL.reg_u.REG.FIFO_RST = 0x0;
+ return MPU_SUCCESS;
+}
+
+
void icm20602_rw_test(struct inv_icm20602_state *st)
{
uint8_t val = 0;
diff --git a/drivers/iio/imu/inv_icm20602/inv_icm20602_core.c b/drivers/iio/imu/inv_icm20602/inv_icm20602_core.c
index 0f7fc92..15df447 100644
--- a/drivers/iio/imu/inv_icm20602/inv_icm20602_core.c
+++ b/drivers/iio/imu/inv_icm20602/inv_icm20602_core.c
@@ -41,12 +41,12 @@
{
struct icm20602_user_config *config = st->config;
- config->user_fps_in_ms = 10;
+ config->user_fps_in_ms = 20;
config->gyro_lpf = INV_ICM20602_GYRO_LFP_92HZ;
config->gyro_fsr = ICM20602_GYRO_FSR_1000DPS;
config->acc_lpf = ICM20602_ACCLFP_99;
config->acc_fsr = ICM20602_ACC_FSR_4G;
- config->gyro_accel_sample_rate = ICM20602_SAMPLE_RATE_100HZ;
+ config->gyro_accel_sample_rate = ICM20602_SAMPLE_RATE_200HZ;
config->fifo_enabled = true;
}
diff --git a/drivers/iio/imu/inv_icm20602/inv_icm20602_iio.h b/drivers/iio/imu/inv_icm20602/inv_icm20602_iio.h
index 943fc1e..9ea5ae5 100644
--- a/drivers/iio/imu/inv_icm20602/inv_icm20602_iio.h
+++ b/drivers/iio/imu/inv_icm20602/inv_icm20602_iio.h
@@ -180,7 +180,7 @@
uint32_t user_fps_in_ms;
bool fifo_enabled;
- uint32_t fifo_waterlevel;
+ uint16_t fifo_waterlevel;
struct X_Y_Z wake_on_motion;
};
@@ -257,19 +257,21 @@
struct struct_icm20602_data {
s64 timestamps;
- struct struct_icm20602_raw_data raw_data;
- struct struct_icm20602_real_data real_data;
+ u8 *raw_data;
};
extern struct iio_trigger *inv_trig;
irqreturn_t inv_icm20602_irq_handler(int irq, void *p);
irqreturn_t inv_icm20602_read_fifo_fn(int irq, void *p);
-int inv_icm20602_reset_fifo(struct iio_dev *indio_dev);
int inv_icm20602_probe_trigger(struct iio_dev *indio_dev);
void inv_icm20602_remove_trigger(struct inv_icm20602_state *st);
int inv_icm20602_validate_trigger(struct iio_dev *indio_dev,
struct iio_trigger *trig);
+int icm20602_int_status(struct inv_icm20602_state *st, u8 *int_status);
+int icm20602_int_wm_status(struct inv_icm20602_state *st, u8 *int_status);
+int icm20602_reset_fifo(struct inv_icm20602_state *st);
+int icm20602_fifo_count(struct inv_icm20602_state *st, u16 *fifo_count);
int icm20602_read_raw(struct inv_icm20602_state *st,
struct struct_icm20602_real_data *real_data, uint32_t type);
diff --git a/drivers/iio/imu/inv_icm20602/inv_icm20602_ring.c b/drivers/iio/imu/inv_icm20602/inv_icm20602_ring.c
index b0f93be..de02656 100644
--- a/drivers/iio/imu/inv_icm20602/inv_icm20602_ring.c
+++ b/drivers/iio/imu/inv_icm20602/inv_icm20602_ring.c
@@ -40,15 +40,6 @@
spin_unlock_irqrestore(&st->time_stamp_lock, flags);
}
-int inv_icm20602_reset_fifo(struct iio_dev *indio_dev)
-{
- int result;
- //u8 d;
- //struct inv_icm20602_state *st = iio_priv(indio_dev);
-
- return result;
-}
-
/*
* inv_icm20602_irq_handler() - Cache a timestamp at each data ready interrupt.
*/
@@ -67,44 +58,50 @@
return IRQ_WAKE_THREAD;
}
+#define BIT_FIFO_OFLOW_INT 0x10
+#define BIT_FIFO_WM_INT 0x40
static int inv_icm20602_read_data(struct iio_dev *indio_dev)
{
int result = MPU_SUCCESS;
struct inv_icm20602_state *st = iio_priv(indio_dev);
struct icm20602_user_config *config = st->config;
int package_count;
- //char *buf = st->buf;
- //struct struct_icm20602_data *data_push = st->data_push;
+ char *buf = st->buf;
+ struct struct_icm20602_data *data_push = st->data_push;
s64 timestamp;
+ u8 int_status, int_wm_status;
+ u16 fifo_count;
int i;
if (!st)
return -MPU_FAIL;
package_count = config->fifo_waterlevel / ICM20602_PACKAGE_SIZE;
mutex_lock(&indio_dev->mlock);
- if (config->fifo_enabled) {
- result = icm20602_read_fifo(st,
- st->buf, config->fifo_waterlevel);
- if (result != config->fifo_waterlevel) {
- pr_err("icm20602 read fifo failed, result = %d\n",
- result);
- goto flush_fifo;
- }
-
- for (i = 0; i < package_count; i++) {
- memcpy((char *)(&st->data_push[i].raw_data),
- st->buf, ICM20602_PACKAGE_SIZE);
- result = kfifo_out(&st->timestamps,
- ×tamp, 1);
- /* when there is no timestamp, put it as 0 */
- if (result == 0)
- timestamp = 0;
- st->data_push[i].timestamps = timestamp;
- iio_push_to_buffers(indio_dev, st->data_push+i);
- st->buf += ICM20602_PACKAGE_SIZE;
- }
+ icm20602_int_status(st, &int_status);
+ if (int_status & BIT_FIFO_OFLOW_INT) {
+ icm20602_fifo_count(st, &fifo_count);
+ pr_debug("fifo_count = %d\n", fifo_count);
+ icm20602_reset_fifo(st);
+ goto end_session;
}
-//end_session:
+ if (config->fifo_enabled) {
+ result = kfifo_out(&st->timestamps,
+ ×tamp, 1);
+ /* when there is no timestamp, put it as 0 */
+ if (result == 0)
+ timestamp = 0;
+ for (i = 0; i < package_count; i++) {
+ result = icm20602_read_fifo(st,
+ buf, ICM20602_PACKAGE_SIZE);
+ memcpy(st->data_push[i].raw_data,
+ buf, ICM20602_PACKAGE_SIZE);
+ iio_push_to_buffers_with_timestamp(indio_dev,
+ st->data_push[i].raw_data, timestamp);
+ buf += ICM20602_PACKAGE_SIZE;
+ }
+ memset(st->buf, 0, config->fifo_waterlevel);
+ }
+end_session:
mutex_unlock(&indio_dev->mlock);
iio_trigger_notify_done(indio_dev->trig);
return MPU_SUCCESS;
@@ -112,7 +109,7 @@
flush_fifo:
/* Flush HW and SW FIFOs. */
inv_clear_kfifo(st);
- inv_icm20602_reset_fifo(indio_dev);
+ icm20602_reset_fifo(st);
mutex_unlock(&indio_dev->mlock);
iio_trigger_notify_done(indio_dev->trig);
return MPU_SUCCESS;
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 1719336..c3376df 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -579,7 +579,6 @@
{ ARM_SMMU_OPT_MMU500_ERRATA1, "qcom,mmu500-errata-1" },
{ ARM_SMMU_OPT_STATIC_CB, "qcom,enable-static-cb"},
{ ARM_SMMU_OPT_HALT, "qcom,enable-smmu-halt"},
- { ARM_SMMU_OPT_HIBERNATION, "qcom,hibernation-support"},
{ 0, NULL},
};
@@ -607,6 +606,7 @@
static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu);
static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain);
static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain);
+static bool arm_smmu_opt_hibernation(struct arm_smmu_device *smmu);
static int msm_secure_smmu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
@@ -635,6 +635,13 @@
arm_smmu_options[i].prop);
}
} while (arm_smmu_options[++i].opt);
+
+ if (arm_smmu_opt_hibernation(smmu) &&
+ smmu->options && ARM_SMMU_OPT_SKIP_INIT) {
+ dev_info(smmu->dev,
+ "Disabling incompatible option: skip-init\n");
+ smmu->options &= ~ARM_SMMU_OPT_SKIP_INIT;
+ }
}
static bool is_dynamic_domain(struct iommu_domain *domain)
@@ -707,7 +714,7 @@
static bool arm_smmu_opt_hibernation(struct arm_smmu_device *smmu)
{
- return smmu->options & ARM_SMMU_OPT_HIBERNATION;
+ return IS_ENABLED(CONFIG_HIBERNATION);
}
/*
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 2f0f448..b0e9fe6 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -44,6 +44,34 @@
#include "irq-gic-common.h"
+#define MAX_IRQ 1020U /* Max number of SGI+PPI+SPI */
+#define SPI_START_IRQ 32 /* SPI start irq number */
+#define GICD_ICFGR_BITS 2 /* 2 bits per irq in GICD_ICFGR */
+#define GICD_ISENABLER_BITS 1 /* 1 bit per irq in GICD_ISENABLER */
+#define GICD_IPRIORITYR_BITS 8 /* 8 bits per irq in GICD_IPRIORITYR */
+
+/* 32 bit mask with lower n bits set */
+#define UMASK_LOW(n) (~0U >> (32 - (n)))
+
+/* Number of 32-bit words required to store all irqs, for
+ * registers where each word stores configuration for each irq
+ * in bits_per_irq bits.
+ */
+#define NUM_IRQ_WORDS(bits_per_irq) (DIV_ROUND_UP(MAX_IRQ, \
+ 32 / (bits_per_irq)))
+#define MAX_IRQS_IGNORE 10
+
+#define IRQ_NR_BOUND(nr) min((nr), MAX_IRQ)
+
+/* Bitmap to irqs, which are restored */
+static DECLARE_BITMAP(irqs_restore, MAX_IRQ);
+
+/* Bitmap to irqs, for which restore is ignored.
+ * Presently, only GICD_IROUTER mismatches are
+ * ignored.
+ */
+static DECLARE_BITMAP(irqs_ignore_restore, MAX_IRQ);
+
struct redist_region {
void __iomem *redist_base;
phys_addr_t phys_base;
@@ -60,6 +88,16 @@
u32 nr_redist_regions;
unsigned int irq_nr;
struct partition_desc *ppi_descs[16];
+
+ u64 saved_spi_router[MAX_IRQ];
+ u32 saved_spi_enable[NUM_IRQ_WORDS(GICD_ISENABLER_BITS)];
+ u32 saved_spi_cfg[NUM_IRQ_WORDS(GICD_ICFGR_BITS)];
+ u32 saved_spi_priority[NUM_IRQ_WORDS(GICD_IPRIORITYR_BITS)];
+
+ u64 changed_spi_router[MAX_IRQ];
+ u32 changed_spi_enable[NUM_IRQ_WORDS(GICD_ISENABLER_BITS)];
+ u32 changed_spi_cfg[NUM_IRQ_WORDS(GICD_ICFGR_BITS)];
+ u32 changed_spi_priority[NUM_IRQ_WORDS(GICD_IPRIORITYR_BITS)];
};
static struct gic_chip_data gic_data __read_mostly;
@@ -67,6 +105,58 @@
static struct gic_kvm_info gic_v3_kvm_info;
+enum gicd_save_restore_reg {
+ SAVED_ICFGR,
+ SAVED_IS_ENABLER,
+ SAVED_IPRIORITYR,
+ NUM_SAVED_GICD_REGS,
+};
+
+/* Stores start address of spi config for saved gicd regs */
+static u32 *saved_spi_regs_start[NUM_SAVED_GICD_REGS] = {
+ [SAVED_ICFGR] = gic_data.saved_spi_cfg,
+ [SAVED_IS_ENABLER] = gic_data.saved_spi_enable,
+ [SAVED_IPRIORITYR] = gic_data.saved_spi_priority,
+};
+
+/* Stores start address of spi config for changed gicd regs */
+static u32 *changed_spi_regs_start[NUM_SAVED_GICD_REGS] = {
+ [SAVED_ICFGR] = gic_data.changed_spi_cfg,
+ [SAVED_IS_ENABLER] = gic_data.changed_spi_enable,
+ [SAVED_IPRIORITYR] = gic_data.changed_spi_priority,
+};
+
+/* GICD offset for saved registers */
+static u32 gicd_offset[NUM_SAVED_GICD_REGS] = {
+ [SAVED_ICFGR] = GICD_ICFGR,
+ [SAVED_IS_ENABLER] = GICD_ISENABLER,
+ [SAVED_IPRIORITYR] = GICD_IPRIORITYR,
+};
+
+/* Bits per irq word, for gicd saved registers */
+static u32 gicd_reg_bits_per_irq[NUM_SAVED_GICD_REGS] = {
+ [SAVED_ICFGR] = GICD_ICFGR_BITS,
+ [SAVED_IS_ENABLER] = GICD_ISENABLER_BITS,
+ [SAVED_IPRIORITYR] = GICD_IPRIORITYR_BITS,
+};
+
+#define for_each_spi_irq_word(i, reg) \
+ for (i = 0; \
+ i < DIV_ROUND_UP(IRQ_NR_BOUND(gic_data.irq_nr) - SPI_START_IRQ, \
+ 32 / gicd_reg_bits_per_irq[reg]); \
+ i++)
+
+#define read_spi_word_offset(base, reg, i) \
+ readl_relaxed_no_log( \
+ base + gicd_offset[reg] + i * 4 + \
+ SPI_START_IRQ * gicd_reg_bits_per_irq[reg] / 8)
+
+#define restore_spi_word_offset(base, reg, i) \
+ writel_relaxed_no_log( \
+ saved_spi_regs_start[reg][i],\
+ base + gicd_offset[reg] + i * 4 + \
+ SPI_START_IRQ * gicd_reg_bits_per_irq[reg] / 8)
+
#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
@@ -134,6 +224,229 @@
}
#endif
+void gic_v3_dist_save(void)
+{
+ void __iomem *base = gic_data.dist_base;
+ int reg, i;
+
+ for (reg = SAVED_ICFGR; reg < NUM_SAVED_GICD_REGS; reg++) {
+ for_each_spi_irq_word(i, reg) {
+ saved_spi_regs_start[reg][i] =
+ read_spi_word_offset(base, reg, i);
+ }
+ }
+
+ for (i = 32; i < IRQ_NR_BOUND(gic_data.irq_nr); i++)
+ gic_data.saved_spi_router[i] =
+ gic_read_irouter(base + GICD_IROUTER + i * 8);
+}
+
+static void _gicd_check_reg(enum gicd_save_restore_reg reg)
+{
+ void __iomem *base = gic_data.dist_base;
+ u32 *saved_spi_cfg = saved_spi_regs_start[reg];
+ u32 *changed_spi_cfg = changed_spi_regs_start[reg];
+ u32 bits_per_irq = gicd_reg_bits_per_irq[reg];
+ u32 current_cfg = 0;
+ int i, j = SPI_START_IRQ, l;
+ u32 k;
+
+ for_each_spi_irq_word(i, reg) {
+ current_cfg = read_spi_word_offset(base, reg, i);
+ if (current_cfg != saved_spi_cfg[i]) {
+ for (k = current_cfg ^ saved_spi_cfg[i],
+ l = 0; k ; k >>= bits_per_irq, l++) {
+ if (k & UMASK_LOW(bits_per_irq))
+ set_bit(j+l, irqs_restore);
+ }
+ changed_spi_cfg[i] = current_cfg ^ saved_spi_cfg[i];
+ }
+ j += 32 / bits_per_irq;
+ }
+}
+
+#define _gic_v3_dist_check_icfgr() \
+ _gicd_check_reg(SAVED_ICFGR)
+#define _gic_v3_dist_check_ipriorityr() \
+ _gicd_check_reg(SAVED_IPRIORITYR)
+#define _gic_v3_dist_check_isenabler() \
+ _gicd_check_reg(SAVED_IS_ENABLER)
+
+static void _gic_v3_dist_check_irouter(void)
+{
+ void __iomem *base = gic_data.dist_base;
+ u64 current_irouter_cfg = 0;
+ int i;
+
+ for (i = 32; i < IRQ_NR_BOUND(gic_data.irq_nr); i++) {
+ if (test_bit(i, irqs_ignore_restore))
+ continue;
+ current_irouter_cfg = gic_read_irouter(
+ base + GICD_IROUTER + i * 8);
+ if (current_irouter_cfg != gic_data.saved_spi_router[i]) {
+ set_bit(i, irqs_restore);
+ gic_data.changed_spi_router[i] =
+ current_irouter_cfg ^ gic_data.saved_spi_router[i];
+ }
+ }
+}
+
+static void _gic_v3_dist_restore_reg(enum gicd_save_restore_reg reg)
+{
+ void __iomem *base = gic_data.dist_base;
+ int i;
+
+ for_each_spi_irq_word(i, reg) {
+ if (changed_spi_regs_start[reg][i])
+ restore_spi_word_offset(base, reg, i);
+ }
+
+ /* Commit all restored configurations before subsequent writes */
+ wmb();
+}
+
+#define _gic_v3_dist_restore_icfgr() _gic_v3_dist_restore_reg(SAVED_ICFGR)
+#define _gic_v3_dist_restore_ipriorityr() \
+ _gic_v3_dist_restore_reg(SAVED_IPRIORITYR)
+
+static void _gic_v3_dist_restore_set_reg(u32 offset)
+{
+ void __iomem *base = gic_data.dist_base;
+ int i, j = SPI_START_IRQ, l;
+ int irq_nr = IRQ_NR_BOUND(gic_data.irq_nr) - SPI_START_IRQ;
+
+ for (i = 0; i < DIV_ROUND_UP(irq_nr, 32); i++, j += 32) {
+ u32 reg_val = readl_relaxed_no_log(base + offset + i * 4 + 4);
+ bool irqs_restore_updated = 0;
+
+ for (l = 0; l < 32; l++) {
+ if (test_bit(j+l, irqs_restore)) {
+ reg_val |= BIT(l);
+ irqs_restore_updated = 1;
+ }
+ }
+
+ if (irqs_restore_updated) {
+ writel_relaxed_no_log(
+ reg_val, base + offset + i * 4 + 4);
+ }
+ }
+
+ /* Commit restored configuration updates before subsequent writes */
+ wmb();
+}
+
+#define _gic_v3_dist_restore_isenabler() \
+ _gic_v3_dist_restore_set_reg(GICD_ISENABLER)
+
+#define _gic_v3_dist_restore_ispending() \
+ _gic_v3_dist_restore_set_reg(GICD_ISPENDR)
+
+static void _gic_v3_dist_restore_irouter(void)
+{
+ void __iomem *base = gic_data.dist_base;
+ int i;
+
+ for (i = 32; i < IRQ_NR_BOUND(gic_data.irq_nr); i++) {
+ if (test_bit(i, irqs_ignore_restore))
+ continue;
+ if (gic_data.changed_spi_router[i]) {
+ gic_write_irouter(gic_data.saved_spi_router[i],
+ base + GICD_IROUTER + i * 8);
+ }
+ }
+
+ /* Commit GICD_IROUTER writes before subsequent writes */
+ wmb();
+}
+
+static void _gic_v3_dist_clear_reg(u32 offset)
+{
+ void __iomem *base = gic_data.dist_base;
+ int i, j = SPI_START_IRQ, l;
+ int irq_nr = IRQ_NR_BOUND(gic_data.irq_nr) - SPI_START_IRQ;
+
+ for (i = 0; i < DIV_ROUND_UP(irq_nr, 32); i++, j += 32) {
+ u32 clear = 0;
+ bool irqs_restore_updated = 0;
+
+ for (l = 0; l < 32; l++) {
+ if (test_bit(j+l, irqs_restore)) {
+ clear |= BIT(l);
+ irqs_restore_updated = 1;
+ }
+ }
+
+ if (irqs_restore_updated) {
+ writel_relaxed_no_log(
+ clear, base + offset + i * 4 + 4);
+ }
+ }
+
+ /* Commit clearing of irq config before subsequent writes */
+ wmb();
+}
+
+#define _gic_v3_dist_set_icenabler() \
+ _gic_v3_dist_clear_reg(GICD_ICENABLER)
+
+#define _gic_v3_dist_set_icpending() \
+ _gic_v3_dist_clear_reg(GICD_ICPENDR)
+
+#define _gic_v3_dist_set_icactive() \
+ _gic_v3_dist_clear_reg(GICD_ICACTIVER)
+
+/* Restore GICD state for SPIs. SPI configuration is restored
+ * for GICD_ICFGR, GICD_ISENABLER, GICD_IPRIORITYR, GICD_IROUTER
+ * registers. Following is the sequence for restore:
+ *
+ * 1. For SPIs, check whether any of GICD_ICFGR, GICD_ISENABLER,
+ * GICD_IPRIORITYR, GICD_IROUTER, current configuration is
+ * different from saved configuration.
+ *
+ * For all irqs, with mismatched configurations,
+ *
+ * 2. Set GICD_ICENABLER and wait for its completion.
+ *
+ * 3. Restore any changed GICD_ICFGR, GICD_IPRIORITYR, GICD_IROUTER
+ * configurations.
+ *
+ * 4. Set GICD_ICACTIVER.
+ *
+ * 5. Set pending for the interrupt.
+ *
+ * 6. Enable interrupt and wait for its completion.
+ *
+ */
+void gic_v3_dist_restore(void)
+{
+ _gic_v3_dist_check_icfgr();
+ _gic_v3_dist_check_ipriorityr();
+ _gic_v3_dist_check_isenabler();
+ _gic_v3_dist_check_irouter();
+
+ if (bitmap_empty(irqs_restore, IRQ_NR_BOUND(gic_data.irq_nr)))
+ return;
+
+ _gic_v3_dist_set_icenabler();
+ gic_dist_wait_for_rwp();
+
+ _gic_v3_dist_restore_icfgr();
+ _gic_v3_dist_restore_ipriorityr();
+ _gic_v3_dist_restore_irouter();
+
+ _gic_v3_dist_set_icactive();
+
+ _gic_v3_dist_set_icpending();
+ _gic_v3_dist_restore_ispending();
+
+ _gic_v3_dist_restore_isenabler();
+ gic_dist_wait_for_rwp();
+
+ /* Commit all writes before proceeding */
+ wmb();
+}
+
/*
* gic_show_pending_irq - Shows the pending interrupts
* Note: Interrupts should be disabled on the cpu from which
@@ -1244,7 +1557,8 @@
struct redist_region *rdist_regs;
u64 redist_stride;
u32 nr_redist_regions;
- int err, i;
+ int err, i, ignore_irqs_len;
+ u32 ignore_restore_irqs[MAX_IRQS_IGNORE] = {0};
dist_base = of_iomap(node, 0);
if (!dist_base) {
@@ -1294,6 +1608,14 @@
gic_populate_ppi_partitions(node);
gic_of_setup_kvm_info(node);
+
+ ignore_irqs_len = of_property_read_variable_u32_array(node,
+ "ignored-save-restore-irqs",
+ ignore_restore_irqs,
+ 0, MAX_IRQS_IGNORE);
+ for (i = 0; i < ignore_irqs_len; i++)
+ set_bit(ignore_restore_irqs[i], irqs_ignore_restore);
+
return 0;
out_unmap_rdist:
diff --git a/drivers/leds/leds-qpnp-haptics.c b/drivers/leds/leds-qpnp-haptics.c
index 764657a..8a850c5 100644
--- a/drivers/leds/leds-qpnp-haptics.c
+++ b/drivers/leds/leds-qpnp-haptics.c
@@ -24,6 +24,7 @@
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/qpnp/qpnp-misc.h>
#include <linux/qpnp/qpnp-revid.h>
@@ -321,6 +322,7 @@
int sc_irq;
struct pwm_param pwm_data;
struct hap_lra_ares_param ares_cfg;
+ struct regulator *vcc_pon;
u32 play_time_ms;
u32 max_play_time_ms;
u32 vmax_mv;
@@ -355,6 +357,7 @@
bool lra_auto_mode;
bool play_irq_en;
bool auto_res_err_recovery_hw;
+ bool vcc_pon_enabled;
};
static int qpnp_haptics_parse_buffer_dt(struct hap_chip *chip);
@@ -801,10 +804,29 @@
enable = atomic_read(&chip->state);
pr_debug("state: %d\n", enable);
+
+ if (chip->vcc_pon && enable && !chip->vcc_pon_enabled) {
+ rc = regulator_enable(chip->vcc_pon);
+ if (rc < 0)
+ pr_err("%s: could not enable vcc_pon regulator rc=%d\n",
+ __func__, rc);
+ else
+ chip->vcc_pon_enabled = true;
+ }
+
rc = qpnp_haptics_play(chip, enable);
if (rc < 0)
pr_err("Error in %sing haptics, rc=%d\n",
enable ? "play" : "stopp", rc);
+
+ if (chip->vcc_pon && !enable && chip->vcc_pon_enabled) {
+ rc = regulator_disable(chip->vcc_pon);
+ if (rc)
+ pr_err("%s: could not disable vcc_pon regulator rc=%d\n",
+ __func__, rc);
+ else
+ chip->vcc_pon_enabled = false;
+ }
}
static enum hrtimer_restart hap_stop_timer(struct hrtimer *timer)
@@ -2054,6 +2076,7 @@
struct device_node *revid_node, *misc_node;
const char *temp_str;
int rc, temp;
+ struct regulator *vcc_pon;
rc = of_property_read_u32(node, "reg", &temp);
if (rc < 0) {
@@ -2381,6 +2404,16 @@
else if (chip->play_mode == HAP_PWM)
rc = qpnp_haptics_parse_pwm_dt(chip);
+ if (of_find_property(node, "vcc_pon-supply", NULL)) {
+ vcc_pon = regulator_get(&chip->pdev->dev, "vcc_pon");
+ if (IS_ERR(vcc_pon)) {
+ rc = PTR_ERR(vcc_pon);
+ dev_err(&chip->pdev->dev,
+ "regulator get failed vcc_pon rc=%d\n", rc);
+ }
+ chip->vcc_pon = vcc_pon;
+ }
+
return rc;
}
diff --git a/drivers/leds/leds-qpnp-wled.c b/drivers/leds/leds-qpnp-wled.c
index d2e576d..861d987 100644
--- a/drivers/leds/leds-qpnp-wled.c
+++ b/drivers/leds/leds-qpnp-wled.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2215,12 +2215,16 @@
return rc;
if (wled->en_ext_pfet_sc_pro) {
- reg = QPNP_WLED_EXT_FET_DTEST2;
- rc = qpnp_wled_sec_write_reg(wled,
+ if (!(wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE
+ && wled->pmic_rev_id->rev4 ==
+ PMI8998_V2P0_REV4)) {
+ reg = QPNP_WLED_EXT_FET_DTEST2;
+ rc = qpnp_wled_sec_write_reg(wled,
QPNP_WLED_TEST1_REG(wled->ctrl_base),
reg);
- if (rc)
- return rc;
+ if (rc)
+ return rc;
+ }
}
} else {
rc = qpnp_wled_read_reg(wled,
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
index 5e4ff0d..90603de 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
@@ -22,11 +22,6 @@
#include "cam_cpas_hw_intf.h"
#include "cam_cpas_soc.h"
-#define CAM_CPAS_AXI_MIN_MNOC_AB_BW (2048 * 1024)
-#define CAM_CPAS_AXI_MIN_MNOC_IB_BW (2048 * 1024)
-#define CAM_CPAS_AXI_MIN_CAMNOC_AB_BW (2048 * 1024)
-#define CAM_CPAS_AXI_MIN_CAMNOC_IB_BW (3000000000L)
-
static uint cam_min_camnoc_ib_bw;
module_param(cam_min_camnoc_ib_bw, uint, 0644);
@@ -82,8 +77,8 @@
if (level == bus_client->curr_vote_level)
return 0;
- CAM_DBG(CAM_CPAS, "Bus client[%d] index[%d]", bus_client->client_id,
- level);
+ CAM_DBG(CAM_CPAS, "Bus client=[%d][%s] index[%d]",
+ bus_client->client_id, bus_client->name, level);
msm_bus_scale_client_update_request(bus_client->client_id, level);
bus_client->curr_vote_level = level;
@@ -152,8 +147,8 @@
path->vectors[0].ab = ab;
path->vectors[0].ib = ib;
- CAM_DBG(CAM_CPAS, "Bus client[%d] :ab[%llu] ib[%llu], index[%d]",
- bus_client->client_id, ab, ib, idx);
+ CAM_DBG(CAM_CPAS, "Bus client=[%d][%s] :ab[%llu] ib[%llu], index[%d]",
+ bus_client->client_id, bus_client->name, ab, ib, idx);
msm_bus_scale_client_update_request(bus_client->client_id, idx);
return 0;
@@ -208,10 +203,12 @@
bus_client->num_paths = pdata->usecase[0].num_paths;
bus_client->curr_vote_level = 0;
bus_client->valid = true;
+ bus_client->name = pdata->name;
mutex_init(&bus_client->lock);
- CAM_DBG(CAM_CPAS, "Bus Client : src=%d, dst=%d, bus_client=%d",
- bus_client->src, bus_client->dst, bus_client->client_id);
+ CAM_DBG(CAM_CPAS, "Bus Client=[%d][%s] : src=%d, dst=%d",
+ bus_client->client_id, bus_client->name,
+ bus_client->src, bus_client->dst);
return 0;
fail_unregister_client:
@@ -463,6 +460,7 @@
{
struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+ struct cam_cpas_client *cpas_client = NULL;
int reg_base_index = cpas_core->regbase_index[reg_base];
uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
int rc = 0;
@@ -478,9 +476,12 @@
return -EINVAL;
mutex_lock(&cpas_core->client_mutex[client_indx]);
+ cpas_client = cpas_core->cpas_client[client_indx];
if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
- CAM_ERR(CAM_CPAS, "client has not started%d", client_indx);
+ CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index);
rc = -EPERM;
goto unlock_client;
}
@@ -503,6 +504,7 @@
{
struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+ struct cam_cpas_client *cpas_client = NULL;
int reg_base_index = cpas_core->regbase_index[reg_base];
uint32_t reg_value;
uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
@@ -522,9 +524,12 @@
return -EINVAL;
mutex_lock(&cpas_core->client_mutex[client_indx]);
+ cpas_client = cpas_core->cpas_client[client_indx];
if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
- CAM_ERR(CAM_CPAS, "client has not started%d", client_indx);
+ CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index);
rc = -EPERM;
goto unlock_client;
}
@@ -580,8 +585,9 @@
soc_private->camnoc_axi_clk_bw_margin) / 100;
if ((required_camnoc_bw > 0) &&
- (required_camnoc_bw < CAM_CPAS_AXI_MIN_CAMNOC_IB_BW))
- required_camnoc_bw = CAM_CPAS_AXI_MIN_CAMNOC_IB_BW;
+ (required_camnoc_bw <
+ soc_private->camnoc_axi_min_ib_bw))
+ required_camnoc_bw = soc_private->camnoc_axi_min_ib_bw;
clk_rate = required_camnoc_bw / soc_private->camnoc_bus_width;
@@ -695,6 +701,7 @@
{
struct cam_axi_vote axi_vote;
struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+ struct cam_cpas_client *cpas_client = NULL;
uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
int rc = 0;
@@ -719,16 +726,20 @@
mutex_lock(&cpas_hw->hw_mutex);
mutex_lock(&cpas_core->client_mutex[client_indx]);
+ cpas_client = cpas_core->cpas_client[client_indx];
if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
- CAM_ERR(CAM_CPAS, "client has not started %d", client_indx);
+ CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index);
rc = -EPERM;
goto unlock_client;
}
CAM_DBG(CAM_CPAS,
- "Client[%d] Requested compressed[%llu], uncompressed[%llu]",
- client_indx, axi_vote.compressed_bw,
+ "Client=[%d][%s][%d] Requested compressed[%llu], uncompressed[%llu]",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index, axi_vote.compressed_bw,
axi_vote.uncompressed_bw);
rc = cam_cpas_util_apply_client_axi_vote(cpas_hw,
@@ -809,7 +820,8 @@
mutex_lock(&ahb_bus_client->lock);
cpas_client->ahb_level = required_level;
- CAM_DBG(CAM_CPAS, "Clients required level[%d], curr_level[%d]",
+ CAM_DBG(CAM_CPAS, "Client=[%d][%s] required level[%d], curr_level[%d]",
+ ahb_bus_client->client_id, ahb_bus_client->name,
required_level, ahb_bus_client->curr_vote_level);
if (required_level == ahb_bus_client->curr_vote_level)
@@ -853,6 +865,7 @@
{
struct cam_ahb_vote ahb_vote;
struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+ struct cam_cpas_client *cpas_client = NULL;
uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
int rc = 0;
@@ -875,17 +888,21 @@
mutex_lock(&cpas_hw->hw_mutex);
mutex_lock(&cpas_core->client_mutex[client_indx]);
+ cpas_client = cpas_core->cpas_client[client_indx];
if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
- CAM_ERR(CAM_CPAS, "client has not started %d", client_indx);
+ CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index);
rc = -EPERM;
goto unlock_client;
}
CAM_DBG(CAM_CPAS,
- "client[%d] : type[%d], level[%d], freq[%ld], applied[%d]",
- client_indx, ahb_vote.type, ahb_vote.vote.level,
- ahb_vote.vote.freq,
+ "client=[%d][%s][%d] : type[%d], level[%d], freq[%ld], applied[%d]",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index, ahb_vote.type,
+ ahb_vote.vote.level, ahb_vote.vote.freq,
cpas_core->cpas_client[client_indx]->ahb_level);
rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw,
@@ -948,32 +965,37 @@
mutex_lock(&cpas_hw->hw_mutex);
mutex_lock(&cpas_core->client_mutex[client_indx]);
+ cpas_client = cpas_core->cpas_client[client_indx];
if (!CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
- CAM_ERR(CAM_CPAS, "client is not registered %d", client_indx);
+ CAM_ERR(CAM_CPAS, "client=[%d] is not registered",
+ client_indx);
rc = -EPERM;
goto done;
}
if (CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
- CAM_ERR(CAM_CPAS, "Client %d is in start state", client_indx);
+ CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] is in start state",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index);
rc = -EPERM;
goto done;
}
- cpas_client = cpas_core->cpas_client[client_indx];
-
- CAM_DBG(CAM_CPAS, "AHB :client[%d] type[%d], level[%d], applied[%d]",
- client_indx, ahb_vote->type, ahb_vote->vote.level,
- cpas_client->ahb_level);
+ CAM_DBG(CAM_CPAS,
+ "AHB :client=[%d][%s][%d] type[%d], level[%d], applied[%d]",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index,
+ ahb_vote->type, ahb_vote->vote.level, cpas_client->ahb_level);
rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw, cpas_client,
ahb_vote, &applied_level);
if (rc)
goto done;
CAM_DBG(CAM_CPAS,
- "AXI client[%d] compressed_bw[%llu], uncompressed_bw[%llu]",
- client_indx, axi_vote->compressed_bw,
+ "AXI client=[%d][%s][%d] compressed_bw[%llu], uncompressed_bw[%llu]",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index, axi_vote->compressed_bw,
axi_vote->uncompressed_bw);
rc = cam_cpas_util_apply_client_axi_vote(cpas_hw,
cpas_client, axi_vote);
@@ -1010,9 +1032,9 @@
cpas_client->started = true;
cpas_core->streamon_clients++;
- CAM_DBG(CAM_CPAS, "client=%s, streamon_clients=%d",
- soc_private->client_name[client_indx],
- cpas_core->streamon_clients);
+ CAM_DBG(CAM_CPAS, "client=[%d][%s][%d] streamon_clients=%d",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index, cpas_core->streamon_clients);
done:
mutex_unlock(&cpas_core->client_mutex[client_indx]);
mutex_unlock(&cpas_hw->hw_mutex);
@@ -1062,18 +1084,20 @@
mutex_lock(&cpas_hw->hw_mutex);
mutex_lock(&cpas_core->client_mutex[client_indx]);
+ cpas_client = cpas_core->cpas_client[client_indx];
- CAM_DBG(CAM_CPAS, "client=%s, streamon_clients=%d",
- soc_private->client_name[client_indx],
- cpas_core->streamon_clients);
+ CAM_DBG(CAM_CPAS, "Client=[%d][%s][%d] streamon_clients=%d",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index, cpas_core->streamon_clients);
if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
- CAM_ERR(CAM_CPAS, "Client %d is not started", client_indx);
+ CAM_ERR(CAM_CPAS, "Client=[%d][%s][%d] is not started",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index);
rc = -EPERM;
goto done;
}
- cpas_client = cpas_core->cpas_client[client_indx];
cpas_client->started = false;
cpas_core->streamon_clients--;
@@ -1207,8 +1231,9 @@
cpas_client, client_indx);
if (rc) {
CAM_ERR(CAM_CPAS,
- "axi_port_insert failed client_indx=%d, rc=%d",
- client_indx, rc);
+ "axi_port_insert failed Client=[%d][%s][%d], rc=%d",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index, rc);
kfree(cpas_client);
mutex_unlock(&cpas_hw->hw_mutex);
return -EINVAL;
@@ -1223,8 +1248,9 @@
mutex_unlock(&cpas_hw->hw_mutex);
- CAM_DBG(CAM_CPAS, "client_indx=%d, registered_clients=%d",
- client_indx, cpas_core->registered_clients);
+ CAM_DBG(CAM_CPAS, "client=[%d][%s][%d], registered_clients=%d",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index, cpas_core->registered_clients);
return 0;
}
@@ -1233,6 +1259,7 @@
uint32_t client_handle)
{
struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+ struct cam_cpas_client *cpas_client = NULL;
uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
int rc = 0;
@@ -1241,15 +1268,20 @@
mutex_lock(&cpas_hw->hw_mutex);
mutex_lock(&cpas_core->client_mutex[client_indx]);
+ cpas_client = cpas_core->cpas_client[client_indx];
if (!CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
- CAM_ERR(CAM_CPAS, "client not registered %d", client_indx);
+ CAM_ERR(CAM_CPAS, "Client=[%d][%s][%d] not registered",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index);
rc = -EPERM;
goto done;
}
if (CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
- CAM_ERR(CAM_CPAS, "Client %d is not stopped", client_indx);
+ CAM_ERR(CAM_CPAS, "Client=[%d][%s][%d] is not stopped",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index);
rc = -EPERM;
goto done;
}
@@ -1257,8 +1289,9 @@
cam_cpas_util_remove_client_from_axi_port(
cpas_core->cpas_client[client_indx]);
- CAM_DBG(CAM_CPAS, "client_indx=%d, registered_clients=%d",
- client_indx, cpas_core->registered_clients);
+ CAM_DBG(CAM_CPAS, "client=[%d][%s][%d], registered_clients=%d",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index, cpas_core->registered_clients);
kfree(cpas_core->cpas_client[client_indx]);
cpas_core->cpas_client[client_indx] = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
index 2e660b1..d51b152 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
@@ -20,6 +20,11 @@
#define CAM_CPAS_MAX_CLIENTS 30
#define CAM_CPAS_INFLIGHT_WORKS 5
+#define CAM_CPAS_AXI_MIN_MNOC_AB_BW (2048 * 1024)
+#define CAM_CPAS_AXI_MIN_MNOC_IB_BW (2048 * 1024)
+#define CAM_CPAS_AXI_MIN_CAMNOC_AB_BW (2048 * 1024)
+#define CAM_CPAS_AXI_MIN_CAMNOC_IB_BW (3000000000L)
+
#define CAM_CPAS_GET_CLIENT_IDX(handle) (handle)
#define CAM_CPAS_GET_CLIENT_HANDLE(indx) (indx)
@@ -118,6 +123,7 @@
* @dyn_vote: Whether dynamic voting enabled
* @lock: Mutex lock used while voting on this client
* @valid: Whether bus client is valid
+ * @name: Name of the bus client
*
*/
struct cam_cpas_bus_client {
@@ -131,6 +137,7 @@
bool dyn_vote;
struct mutex lock;
bool valid;
+ const char *name;
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
index 8f9ec14..b73b32a 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
@@ -56,6 +56,27 @@
CAM_DBG(CAM_CPAS, "CPAS HW VERSION %x", soc_private->hw_version);
+ soc_private->camnoc_axi_min_ib_bw = 0;
+ rc = of_property_read_u64(of_node,
+ "camnoc-axi-min-ib-bw",
+ &soc_private->camnoc_axi_min_ib_bw);
+ if (rc == -EOVERFLOW) {
+ soc_private->camnoc_axi_min_ib_bw = 0;
+ rc = of_property_read_u32(of_node,
+ "camnoc-axi-min-ib-bw",
+ (u32 *)&soc_private->camnoc_axi_min_ib_bw);
+ }
+
+ if (rc) {
+ CAM_DBG(CAM_CPAS,
+ "failed to read camnoc-axi-min-ib-bw rc:%d", rc);
+ soc_private->camnoc_axi_min_ib_bw =
+ CAM_CPAS_AXI_MIN_CAMNOC_IB_BW;
+ }
+
+ CAM_DBG(CAM_CPAS, "camnoc-axi-min-ib-bw = %llu",
+ soc_private->camnoc_axi_min_ib_bw);
+
soc_private->client_id_based = of_property_read_bool(of_node,
"client-id-based");
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
index 91e8d0c0..f6ae8a8 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
@@ -48,6 +48,7 @@
* @camnoc_bus_width : CAMNOC Bus width
* @camnoc_axi_clk_bw_margin : BW Margin in percentage to add while calculating
* camnoc axi clock
+ * @camnoc_axi_min_ib_bw: Min camnoc BW which varies based on target
*
*/
struct cam_cpas_private_soc {
@@ -66,6 +67,7 @@
bool control_camnoc_axi_clk;
uint32_t camnoc_bus_width;
uint32_t camnoc_axi_clk_bw_margin;
+ uint64_t camnoc_axi_min_ib_bw;
};
int cam_cpas_soc_init_resources(struct cam_hw_soc_info *soc_info,
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index f9985eb..50cdc7d 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -1325,9 +1325,7 @@
struct list_head flush_list;
INIT_LIST_HEAD(&flush_list);
- spin_lock_bh(&ctx->lock);
if (list_empty(req_list)) {
- spin_unlock_bh(&ctx->lock);
CAM_DBG(CAM_ISP, "request list is empty");
return 0;
}
@@ -1346,7 +1344,6 @@
list_del_init(&req->list);
list_add_tail(&req->list, &flush_list);
}
- spin_unlock_bh(&ctx->lock);
list_for_each_entry_safe(req, req_temp, &flush_list, list) {
req_isp = (struct cam_isp_ctx_req *) req->req_priv;
@@ -1364,16 +1361,16 @@
req_isp->fence_map_out[i].sync_id = -1;
}
}
- spin_lock_bh(&ctx->lock);
list_add_tail(&req->list, &ctx->free_req_list);
- spin_unlock_bh(&ctx->lock);
}
if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ &&
- !cancel_req_id_found)
- CAM_DBG(CAM_ISP,
+ !cancel_req_id_found) {
+ CAM_INFO(CAM_ISP,
"Flush request id:%lld is not found in the list",
flush_req->req_id);
+ return -EINVAL;
+ }
return 0;
}
@@ -1385,7 +1382,9 @@
int rc = 0;
CAM_DBG(CAM_ISP, "try to flush pending list");
+ spin_lock_bh(&ctx->lock);
rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
+ spin_unlock_bh(&ctx->lock);
CAM_DBG(CAM_ISP, "Flush request in top state %d",
ctx->state);
return rc;
@@ -1399,13 +1398,17 @@
struct cam_isp_context *ctx_isp;
ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
- spin_lock_bh(&ctx->lock);
- ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_FLUSH;
- ctx_isp->frame_skip_count = 2;
- spin_unlock_bh(&ctx->lock);
CAM_DBG(CAM_ISP, "Flush request in state %d", ctx->state);
rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
+
+ /* only if request is found in pending queue, move to flush state*/
+ if (!rc) {
+ spin_lock_bh(&ctx->lock);
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_FLUSH;
+ ctx_isp->frame_skip_count = 2;
+ spin_unlock_bh(&ctx->lock);
+ }
return rc;
}
@@ -1416,10 +1419,10 @@
int rc = 0;
CAM_DBG(CAM_ISP, "try to flush pending list");
+ spin_lock_bh(&ctx->lock);
rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
/* if nothing is in pending req list, change state to acquire*/
- spin_lock_bh(&ctx->lock);
if (list_empty(&ctx->pending_req_list))
ctx->state = CAM_CTX_ACQUIRED;
spin_unlock_bh(&ctx->lock);
@@ -1995,8 +1998,9 @@
flush_req.dev_hdl = ctx->dev_hdl;
CAM_DBG(CAM_ISP, "try to flush pending list");
+ spin_lock_bh(&ctx->lock);
rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req);
-
+ spin_unlock_bh(&ctx->lock);
ctx->state = CAM_CTX_AVAILABLE;
trace_cam_context_state("ISP", ctx);
@@ -2381,7 +2385,9 @@
ctx_isp->active_req_cnt = 0;
ctx_isp->reported_req_id = 0;
ctx_isp->substate_activated = ctx_isp->rdi_only_context ?
- CAM_ISP_CTX_ACTIVATED_APPLIED : CAM_ISP_CTX_ACTIVATED_SOF;
+ CAM_ISP_CTX_ACTIVATED_APPLIED :
+ (req_isp->num_fence_map_out) ? CAM_ISP_CTX_ACTIVATED_EPOCH :
+ CAM_ISP_CTX_ACTIVATED_SOF;
/*
* Only place to change state before calling the hw due to
@@ -2399,6 +2405,11 @@
goto end;
}
CAM_DBG(CAM_ISP, "start device success");
+
+ if (req_isp->num_fence_map_out) {
+ list_del_init(&req->list);
+ list_add_tail(&req->list, &ctx->active_req_list);
+ }
end:
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index 38a4497..12c37863 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -210,7 +210,8 @@
int rc = -1;
struct cam_hw_intf *hw_intf;
- for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ /* Start slave (which is right split) first */
+ for (i = CAM_ISP_HW_SPLIT_MAX - 1; i >= 0; i--) {
if (!isp_hw_res->hw_res[i])
continue;
hw_intf = isp_hw_res->hw_res[i]->hw_intf;
@@ -959,6 +960,7 @@
{
int rc = -1;
int i;
+ int master_idx = -1;
struct cam_ife_hw_mgr *ife_hw_mgr;
struct cam_ife_hw_mgr_res *csid_res;
@@ -1010,18 +1012,27 @@
if (!cid_res->hw_res[i])
continue;
- csid_acquire.node_res = NULL;
- if (csid_res->is_dual_vfe) {
- if (i == CAM_ISP_HW_SPLIT_LEFT)
- csid_acquire.sync_mode =
- CAM_ISP_HW_SYNC_MASTER;
- else
- csid_acquire.sync_mode =
- CAM_ISP_HW_SYNC_SLAVE;
- }
-
hw_intf = ife_hw_mgr->csid_devices[
cid_res->hw_res[i]->hw_intf->hw_idx];
+
+ csid_acquire.node_res = NULL;
+ if (csid_res->is_dual_vfe) {
+ if (i == CAM_ISP_HW_SPLIT_LEFT) {
+ master_idx = hw_intf->hw_idx;
+ csid_acquire.sync_mode =
+ CAM_ISP_HW_SYNC_MASTER;
+ } else {
+ if (master_idx == -1) {
+ CAM_ERR(CAM_ISP,
+ "No Master found");
+ goto err;
+ }
+ csid_acquire.sync_mode =
+ CAM_ISP_HW_SYNC_SLAVE;
+ csid_acquire.master_idx = master_idx;
+ }
+ }
+
rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
&csid_acquire, sizeof(csid_acquire));
if (rc) {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
index d20450c..3edae4a 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -851,7 +851,6 @@
path_data->cid = reserve->cid;
path_data->in_format = reserve->in_port->format;
path_data->out_format = reserve->out_port->format;
- path_data->master_idx = reserve->master_idx;
path_data->sync_mode = reserve->sync_mode;
path_data->height = reserve->in_port->height;
path_data->start_line = reserve->in_port->line_start;
@@ -877,9 +876,11 @@
goto end;
}
- CAM_DBG(CAM_ISP, "Res id: %d height:%d line_start %d line_stop %d",
+ CAM_DBG(CAM_ISP,
+ "Res id: %d height:%d line_start %d line_stop %d crop_en %d",
reserve->res_id, reserve->in_port->height,
- reserve->in_port->line_start, reserve->in_port->line_stop);
+ reserve->in_port->line_start, reserve->in_port->line_stop,
+ path_data->crop_enable);
if (reserve->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG) {
path_data->dt = CAM_IFE_CSID_TPG_DT_VAL;
@@ -893,20 +894,23 @@
path_data->start_pixel = reserve->in_port->left_start;
path_data->end_pixel = reserve->in_port->left_stop;
path_data->width = reserve->in_port->left_width;
- CAM_DBG(CAM_ISP, "CSID:%dmaster:startpixel 0x%x endpixel:0x%x",
+ CAM_DBG(CAM_ISP, "CSID:%d master:startpixel 0x%x endpixel:0x%x",
csid_hw->hw_intf->hw_idx, path_data->start_pixel,
path_data->end_pixel);
- CAM_DBG(CAM_ISP, "CSID:%dmaster:line start:0x%x line end:0x%x",
+ CAM_DBG(CAM_ISP, "CSID:%d master:line start:0x%x line end:0x%x",
csid_hw->hw_intf->hw_idx, path_data->start_line,
path_data->end_line);
} else if (reserve->sync_mode == CAM_ISP_HW_SYNC_SLAVE) {
+ path_data->master_idx = reserve->master_idx;
+ CAM_DBG(CAM_ISP, "CSID:%d master_idx=%d",
+ csid_hw->hw_intf->hw_idx, path_data->master_idx);
path_data->start_pixel = reserve->in_port->right_start;
path_data->end_pixel = reserve->in_port->right_stop;
path_data->width = reserve->in_port->right_width;
CAM_DBG(CAM_ISP, "CSID:%d slave:start:0x%x end:0x%x width 0x%x",
csid_hw->hw_intf->hw_idx, path_data->start_pixel,
path_data->end_pixel, path_data->width);
- CAM_DBG(CAM_ISP, "CSID:%dmaster:line start:0x%x line end:0x%x",
+ CAM_DBG(CAM_ISP, "CSID:%d slave:line start:0x%x line end:0x%x",
csid_hw->hw_intf->hw_idx, path_data->start_line,
path_data->end_line);
} else {
@@ -1431,19 +1435,6 @@
cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
csid_reg->ipp_reg->csid_ipp_line_drop_period_addr);
- /*Set master or slave IPP */
- if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER)
- /*Set halt mode as master */
- val = CSID_HALT_MODE_MASTER << 2;
- else if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
- /*Set halt mode as slave and set master idx */
- val = path_data->master_idx << 4 | CSID_HALT_MODE_SLAVE << 2;
- else
- /* Default is internal halt mode */
- val = 0;
-
- cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
- csid_reg->ipp_reg->csid_ipp_ctrl_addr);
/* Enable the IPP path */
val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
@@ -1545,19 +1536,31 @@
CAM_DBG(CAM_ISP, "Enable IPP path");
- /* Resume at frame boundary */
- if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
- val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
- csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+
+ /* Set master or slave IPP */
+ if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER)
+ /*Set halt mode as master */
+ val = CSID_HALT_MODE_MASTER << 2;
+ else if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
+ /*Set halt mode as slave and set master idx */
+ val = path_data->master_idx << 4 | CSID_HALT_MODE_SLAVE << 2;
+ else
+ /* Default is internal halt mode */
+ val = 0;
+
+ /*
+ * Resume at frame boundary if Master or No Sync.
+ * Slave will get resume command from Master.
+ */
+ if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER ||
+ path_data->sync_mode == CAM_ISP_HW_SYNC_NONE)
val |= CAM_CSID_RESUME_AT_FRAME_BOUNDARY;
- cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
- csid_reg->ipp_reg->csid_ipp_ctrl_addr);
- } else if (path_data->sync_mode == CAM_ISP_HW_SYNC_NONE) {
- cam_io_w_mb(CAM_CSID_RESUME_AT_FRAME_BOUNDARY,
- soc_info->reg_map[0].mem_base +
- csid_reg->ipp_reg->csid_ipp_ctrl_addr);
- }
- /* for slave mode, not need to resume for slave device */
+
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+
+ CAM_DBG(CAM_ISP, "CSID:%d IPP Ctrl val: 0x%x",
+ csid_hw->hw_intf->hw_idx, val);
/* Enable the required ipp interrupts */
val = CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW;
@@ -1569,6 +1572,7 @@
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+ CAM_DBG(CAM_ISP, "Enable IPP IRQ mask 0x%x", val);
res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
@@ -2618,38 +2622,46 @@
if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOT_IRQ) {
if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL0_EOT_CAPTURED) {
- CAM_ERR(CAM_ISP, "CSID:%d PHY_DL0_EOT_CAPTURED",
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "CSID:%d PHY_DL0_EOT_CAPTURED",
csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL1_EOT_CAPTURED) {
- CAM_ERR(CAM_ISP, "CSID:%d PHY_DL1_EOT_CAPTURED",
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "CSID:%d PHY_DL1_EOT_CAPTURED",
csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL2_EOT_CAPTURED) {
- CAM_ERR(CAM_ISP, "CSID:%d PHY_DL2_EOT_CAPTURED",
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "CSID:%d PHY_DL2_EOT_CAPTURED",
csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL3_EOT_CAPTURED) {
- CAM_ERR(CAM_ISP, "CSID:%d PHY_DL3_EOT_CAPTURED",
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "CSID:%d PHY_DL3_EOT_CAPTURED",
csid_hw->hw_intf->hw_idx);
}
}
if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOT_IRQ) {
if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL0_SOT_CAPTURED) {
- CAM_ERR(CAM_ISP, "CSID:%d PHY_DL0_SOT_CAPTURED",
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "CSID:%d PHY_DL0_SOT_CAPTURED",
csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL1_SOT_CAPTURED) {
- CAM_ERR(CAM_ISP, "CSID:%d PHY_DL1_SOT_CAPTURED",
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "CSID:%d PHY_DL1_SOT_CAPTURED",
csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL2_SOT_CAPTURED) {
- CAM_ERR(CAM_ISP, "CSID:%d PHY_DL2_SOT_CAPTURED",
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "CSID:%d PHY_DL2_SOT_CAPTURED",
csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL3_SOT_CAPTURED) {
- CAM_ERR(CAM_ISP, "CSID:%d PHY_DL3_SOT_CAPTURED",
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "CSID:%d PHY_DL3_SOT_CAPTURED",
csid_hw->hw_intf->hw_idx);
}
}
@@ -2709,16 +2721,17 @@
if ((irq_status_ipp & CSID_PATH_INFO_INPUT_SOF) &&
(csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ))
- CAM_ERR(CAM_ISP, "CSID:%d IPP SOF received",
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d IPP SOF received",
csid_hw->hw_intf->hw_idx);
if ((irq_status_ipp & CSID_PATH_INFO_INPUT_EOF) &&
(csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ))
- CAM_ERR(CAM_ISP, "CSID:%d IPP EOF received",
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d IPP EOF received",
csid_hw->hw_intf->hw_idx);
if (irq_status_ipp & CSID_PATH_ERROR_FIFO_OVERFLOW) {
- CAM_ERR(CAM_ISP, "CSID:%d IPP fifo over flow",
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "CSID:%d IPP fifo over flow",
csid_hw->hw_intf->hw_idx);
/*Stop IPP path immediately */
cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
@@ -2736,14 +2749,17 @@
if ((irq_status_rdi[i] & CSID_PATH_INFO_INPUT_SOF) &&
(csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ))
- CAM_ERR(CAM_ISP, "CSID RDI:%d SOF received", i);
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "CSID RDI:%d SOF received", i);
if ((irq_status_rdi[i] & CSID_PATH_INFO_INPUT_EOF) &&
(csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ))
- CAM_ERR(CAM_ISP, "CSID RDI:%d EOF received", i);
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "CSID RDI:%d EOF received", i);
if (irq_status_rdi[i] & CSID_PATH_ERROR_FIFO_OVERFLOW) {
- CAM_ERR(CAM_ISP, "CSID:%d RDI fifo over flow",
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "CSID:%d RDI fifo over flow",
csid_hw->hw_intf->hw_idx);
/*Stop RDI path immediately */
cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
index a4ba2e1..984adf7 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
index 734cbdb..90c8006 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
@@ -138,6 +138,9 @@
camif_data->first_line = acquire_data->vfe_in.in_port->line_start;
camif_data->last_line = acquire_data->vfe_in.in_port->line_stop;
+ CAM_DBG(CAM_ISP, "hw id:%d pix_pattern:%d dsp_mode=%d",
+ camif_res->hw_intf->hw_idx,
+ camif_data->pix_pattern, camif_data->dsp_mode);
return rc;
}
@@ -249,6 +252,8 @@
/* Reg Update */
cam_io_w_mb(rsrc_data->reg_data->reg_update_cmd_data,
rsrc_data->mem_base + rsrc_data->camif_reg->reg_update_cmd);
+ CAM_DBG(CAM_ISP, "hw id:%d RUP val:%d", camif_res->hw_intf->hw_idx,
+ rsrc_data->reg_data->reg_update_cmd_data);
CAM_DBG(CAM_ISP, "Start Camif IFE %d Done", camif_res->hw_intf->hw_idx);
return 0;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index adfac57..ff18fa74 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -1447,6 +1447,8 @@
if (idx < 0) {
CAM_ERR(CAM_CRM, "req_id %lld not found in input queue",
flush_info->req_id);
+ mutex_unlock(&link->req.lock);
+ return -EINVAL;
} else {
CAM_DBG(CAM_CRM, "req_id %lld found at idx %d",
flush_info->req_id, idx);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
index fe69fcb..b47f4f3 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
@@ -205,8 +205,6 @@
CAM_INFO(CAM_CCI, "****CCI MASTER %d Registers ****",
master);
for (i = 0; i < DEBUG_MASTER_REG_COUNT; i++) {
- if (i == 6)
- continue;
reg_offset = DEBUG_MASTER_REG_START + master*0x100 + i * 4;
read_val = cam_io_r_mb(base + reg_offset);
CAM_INFO(CAM_CCI, "offset = 0x%X value = 0x%X",
@@ -868,6 +866,180 @@
return rc;
}
+static int32_t cam_cci_burst_read(struct v4l2_subdev *sd,
+ struct cam_cci_ctrl *c_ctrl)
+{
+ int32_t rc = 0;
+ uint32_t val = 0, i = 0;
+ unsigned long rem_jiffies;
+ int32_t read_words = 0, exp_words = 0;
+ int32_t index = 0, first_byte = 0, total_read_words = 0;
+ enum cci_i2c_master_t master;
+ enum cci_i2c_queue_t queue = QUEUE_1;
+ struct cci_device *cci_dev = NULL;
+ struct cam_cci_read_cfg *read_cfg = NULL;
+ struct cam_hw_soc_info *soc_info = NULL;
+ void __iomem *base = NULL;
+
+ cci_dev = v4l2_get_subdevdata(sd);
+ master = c_ctrl->cci_info->cci_i2c_master;
+ read_cfg = &c_ctrl->cfg.cci_i2c_read_cfg;
+
+ if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX
+ || c_ctrl->cci_info->cci_i2c_master < 0) {
+ CAM_ERR(CAM_CCI, "Invalid I2C master addr");
+ return -EINVAL;
+ }
+
+ soc_info = &cci_dev->soc_info;
+ base = soc_info->reg_map[0].mem_base;
+ mutex_lock(&cci_dev->cci_master_info[master].mutex_q[queue]);
+
+ /*
+ * Todo: If there is a change in frequency of operation
+ * Wait for previos transaction to complete
+ */
+
+ /* Set the I2C Frequency */
+ rc = cam_cci_set_clk_param(cci_dev, c_ctrl);
+ if (rc < 0) {
+ CAM_ERR(CAM_CCI, "cam_cci_set_clk_param failed rc = %d", rc);
+ goto rel_mutex;
+ }
+
+ /*
+ * Call validate queue to make sure queue is empty before starting.
+ * If this call fails, don't proceed with i2c_read call. This is to
+ * avoid overflow / underflow of queue
+ */
+ rc = cam_cci_validate_queue(cci_dev,
+ cci_dev->cci_i2c_queue_info[master][queue].max_queue_size - 1,
+ master, queue);
+ if (rc < 0) {
+ CAM_ERR(CAM_CCI, "Initial validataion failed rc %d", rc);
+ goto rel_mutex;
+ }
+
+ if (c_ctrl->cci_info->retries > CCI_I2C_READ_MAX_RETRIES) {
+ CAM_ERR(CAM_CCI, "More than max retries");
+ goto rel_mutex;
+ }
+
+ if (read_cfg->data == NULL) {
+ CAM_ERR(CAM_CCI, "Data ptr is NULL");
+ goto rel_mutex;
+ }
+
+ if (read_cfg->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
+ CAM_ERR(CAM_CCI, "failed : Invalid addr type: %u",
+ read_cfg->addr_type);
+ rc = -EINVAL;
+ goto rel_mutex;
+ }
+
+ CAM_DBG(CAM_CCI, "set param sid 0x%x retries %d id_map %d",
+ c_ctrl->cci_info->sid, c_ctrl->cci_info->retries,
+ c_ctrl->cci_info->id_map);
+ val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 |
+ c_ctrl->cci_info->retries << 16 |
+ c_ctrl->cci_info->id_map << 18;
+ rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+ goto rel_mutex;
+ }
+
+ val = CCI_I2C_LOCK_CMD;
+ rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+ goto rel_mutex;
+ }
+
+ val = CCI_I2C_WRITE_DISABLE_P_CMD | (read_cfg->addr_type << 4);
+ for (i = 0; i < read_cfg->addr_type; i++) {
+ val |= ((read_cfg->addr >> (i << 3)) & 0xFF) <<
+ ((read_cfg->addr_type - i) << 3);
+ }
+
+ rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+ goto rel_mutex;
+ }
+
+ val = CCI_I2C_READ_CMD | (read_cfg->num_byte << 4);
+ rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+ goto rel_mutex;
+ }
+
+ val = CCI_I2C_UNLOCK_CMD;
+ rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+ goto rel_mutex;
+ }
+
+ val = cam_io_r_mb(base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR
+ + master * 0x200 + queue * 0x100);
+ CAM_DBG(CAM_CCI, "cur word cnt 0x%x", val);
+ cam_io_w_mb(val, base + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR
+ + master * 0x200 + queue * 0x100);
+
+ val = 1 << ((master * 2) + queue);
+ cam_io_w_mb(val, base + CCI_QUEUE_START_ADDR);
+ exp_words = ((read_cfg->num_byte / 4) + 1);
+
+ while (exp_words != total_read_words) {
+ rem_jiffies = wait_for_completion_timeout(
+ &cci_dev->cci_master_info[master].reset_complete,
+ CCI_TIMEOUT);
+ if (!rem_jiffies) {
+ rc = -ETIMEDOUT;
+ val = cam_io_r_mb(base +
+ CCI_I2C_M0_READ_BUF_LEVEL_ADDR +
+ master * 0x100);
+ CAM_ERR(CAM_CCI,
+ "wait_for_completion_timeout rc = %d FIFO buf_lvl:0x%x",
+ rc, val);
+#ifdef DUMP_CCI_REGISTERS
+ cam_cci_dump_registers(cci_dev, master, queue);
+#endif
+ cam_cci_flush_queue(cci_dev, master);
+ goto rel_mutex;
+ }
+
+ read_words = cam_io_r_mb(base +
+ CCI_I2C_M0_READ_BUF_LEVEL_ADDR + master * 0x100);
+ total_read_words += read_words;
+ do {
+ val = cam_io_r_mb(base +
+ CCI_I2C_M0_READ_DATA_ADDR + master * 0x100);
+ for (i = 0; (i < 4) &&
+ (index < read_cfg->num_byte); i++) {
+ CAM_DBG(CAM_CCI, "i:%d index:%d", i, index);
+ if (!first_byte) {
+ CAM_DBG(CAM_CCI, "sid 0x%x",
+ val & 0xFF);
+ first_byte++;
+ } else {
+ read_cfg->data[index] =
+ (val >> (i * 8)) & 0xFF;
+ CAM_DBG(CAM_CCI, "data[%d] 0x%x", index,
+ read_cfg->data[index]);
+ index++;
+ }
+ }
+ } while (--read_words > 0);
+ }
+
+rel_mutex:
+ mutex_unlock(&cci_dev->cci_master_info[master].mutex_q[queue]);
+ return rc;
+}
+
static int32_t cam_cci_read(struct v4l2_subdev *sd,
struct cam_cci_ctrl *c_ctrl)
{
@@ -1004,7 +1176,11 @@
#endif
if (rc == 0)
rc = -ETIMEDOUT;
- CAM_ERR(CAM_CCI, "wait_for_completion_timeout rc = %d", rc);
+ val = cam_io_r_mb(base +
+ CCI_I2C_M0_READ_BUF_LEVEL_ADDR + master * 0x100);
+ CAM_ERR(CAM_CCI,
+ "wait_for_completion_timeout rc = %d FIFO buf_lvl: 0x%x",
+ rc, val);
cam_cci_flush_queue(cci_dev, master);
goto rel_mutex;
} else {
@@ -1224,19 +1400,26 @@
read_bytes = read_cfg->num_byte;
do {
- if (read_bytes > CCI_READ_MAX)
- read_cfg->num_byte = CCI_READ_MAX;
+ if (read_bytes > CCI_I2C_MAX_BYTE_COUNT)
+ read_cfg->num_byte = CCI_I2C_MAX_BYTE_COUNT;
else
read_cfg->num_byte = read_bytes;
- rc = cam_cci_read(sd, c_ctrl);
- if (rc < 0) {
- CAM_ERR(CAM_CCI, "failed rc %d", rc);
+
+ if (read_cfg->num_byte > CCI_READ_MAX)
+ rc = cam_cci_burst_read(sd, c_ctrl);
+ else
+ rc = cam_cci_read(sd, c_ctrl);
+
+ if (!rc) {
+ CAM_ERR(CAM_CCI, "failed to read rc:%d", rc);
goto ERROR;
}
- if (read_bytes > CCI_READ_MAX) {
- read_cfg->addr += CCI_READ_MAX;
- read_cfg->data += CCI_READ_MAX;
- read_bytes -= CCI_READ_MAX;
+
+ if (read_bytes > CCI_I2C_MAX_BYTE_COUNT) {
+ read_cfg->addr += (CCI_I2C_MAX_BYTE_COUNT /
+ read_cfg->data_type);
+ read_cfg->data += CCI_I2C_MAX_BYTE_COUNT;
+ read_bytes -= CCI_I2C_MAX_BYTE_COUNT;
} else {
read_bytes = 0;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
index da08bc7..c8ca85d 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
@@ -58,18 +58,23 @@
irqreturn_t cam_cci_irq(int irq_num, void *data)
{
- uint32_t irq;
+ uint32_t irq_status0 = 0;
+ uint32_t irq_status1 = 0;
struct cci_device *cci_dev = data;
struct cam_hw_soc_info *soc_info =
&cci_dev->soc_info;
void __iomem *base = soc_info->reg_map[0].mem_base;
unsigned long flags;
+ bool burst_read_assert = false;
- irq = cam_io_r_mb(base + CCI_IRQ_STATUS_0_ADDR);
- cam_io_w_mb(irq, base + CCI_IRQ_CLEAR_0_ADDR);
+ irq_status0 = cam_io_r_mb(base + CCI_IRQ_STATUS_0_ADDR);
+ irq_status1 = cam_io_r_mb(base + CCI_IRQ_STATUS_1_ADDR);
+ cam_io_w_mb(irq_status0, base + CCI_IRQ_CLEAR_0_ADDR);
+ cam_io_w_mb(irq_status1, base + CCI_IRQ_CLEAR_1_ADDR);
cam_io_w_mb(0x1, base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
- if (irq & CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK) {
+ CAM_DBG(CAM_CCI, "irq0:%x irq1:%x", irq_status0, irq_status1);
+ if (irq_status0 & CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK) {
if (cci_dev->cci_master_info[MASTER_0].reset_pending == TRUE) {
cci_dev->cci_master_info[MASTER_0].reset_pending =
FALSE;
@@ -83,11 +88,24 @@
&cci_dev->cci_master_info[MASTER_1].reset_complete);
}
}
- if (irq & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) {
+
+ if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) &&
+ (irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD)) {
+ cci_dev->cci_master_info[MASTER_0].status = 0;
+ complete(&cci_dev->cci_master_info[MASTER_0].reset_complete);
+ burst_read_assert = true;
+ }
+ if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) &&
+ (!burst_read_assert)) {
cci_dev->cci_master_info[MASTER_0].status = 0;
complete(&cci_dev->cci_master_info[MASTER_0].reset_complete);
}
- if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK) {
+ if ((irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD) &&
+ (!burst_read_assert)) {
+ cci_dev->cci_master_info[MASTER_0].status = 0;
+ complete(&cci_dev->cci_master_info[MASTER_0].reset_complete);
+ }
+ if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK) {
struct cam_cci_master_info *cci_master_info;
cci_master_info = &cci_dev->cci_master_info[MASTER_0];
@@ -104,7 +122,7 @@
&cci_dev->cci_master_info[MASTER_0].lock_q[QUEUE_0],
flags);
}
- if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK) {
+ if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK) {
struct cam_cci_master_info *cci_master_info;
cci_master_info = &cci_dev->cci_master_info[MASTER_0];
@@ -121,11 +139,23 @@
&cci_dev->cci_master_info[MASTER_0].lock_q[QUEUE_1],
flags);
}
- if (irq & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) {
+ if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) &&
+ (irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD)) {
+ cci_dev->cci_master_info[MASTER_1].status = 0;
+ complete(&cci_dev->cci_master_info[MASTER_1].reset_complete);
+ burst_read_assert = true;
+ }
+ if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) &&
+ (!burst_read_assert)) {
cci_dev->cci_master_info[MASTER_1].status = 0;
complete(&cci_dev->cci_master_info[MASTER_1].reset_complete);
}
- if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK) {
+ if ((irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD) &&
+ (!burst_read_assert)) {
+ cci_dev->cci_master_info[MASTER_1].status = 0;
+ complete(&cci_dev->cci_master_info[MASTER_1].reset_complete);
+ }
+ if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK) {
struct cam_cci_master_info *cci_master_info;
cci_master_info = &cci_dev->cci_master_info[MASTER_1];
@@ -142,7 +172,7 @@
&cci_dev->cci_master_info[MASTER_1].lock_q[QUEUE_0],
flags);
}
- if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK) {
+ if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK) {
struct cam_cci_master_info *cci_master_info;
cci_master_info = &cci_dev->cci_master_info[MASTER_1];
@@ -159,27 +189,27 @@
&cci_dev->cci_master_info[MASTER_1].lock_q[QUEUE_1],
flags);
}
- if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK) {
+ if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK) {
cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
cam_io_w_mb(CCI_M0_RESET_RMSK,
base + CCI_RESET_CMD_ADDR);
}
- if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK) {
+ if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK) {
cci_dev->cci_master_info[MASTER_1].reset_pending = TRUE;
cam_io_w_mb(CCI_M1_RESET_RMSK,
base + CCI_RESET_CMD_ADDR);
}
- if (irq & CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK) {
+ if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK) {
cci_dev->cci_master_info[MASTER_0].status = -EINVAL;
cam_io_w_mb(CCI_M0_HALT_REQ_RMSK,
base + CCI_HALT_REQ_ADDR);
- CAM_DBG(CAM_CCI, "MASTER_0 error 0x%x", irq);
+ CAM_DBG(CAM_CCI, "MASTER_0 error 0x%x", irq_status0);
}
- if (irq & CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK) {
+ if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK) {
cci_dev->cci_master_info[MASTER_1].status = -EINVAL;
cam_io_w_mb(CCI_M1_HALT_REQ_RMSK,
base + CCI_HALT_REQ_ADDR);
- CAM_DBG(CAM_CCI, "MASTER_1 error 0x%x", irq);
+ CAM_DBG(CAM_CCI, "MASTER_1 error 0x%x", irq_status0);
}
return IRQ_HANDLED;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
index 7cde619..d48ffd1 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
@@ -65,19 +65,14 @@
#define MAX_LRME_V4l2_EVENTS 30
/* Max bytes that can be read per CCI read transaction */
-#define CCI_READ_MAX 12
+#define CCI_READ_MAX 256
#define CCI_I2C_READ_MAX_RETRIES 3
#define CCI_I2C_MAX_READ 8192
#define CCI_I2C_MAX_WRITE 8192
+#define CCI_I2C_MAX_BYTE_COUNT 65535
#define CAMX_CCI_DEV_NAME "cam-cci-driver"
-/* Max bytes that can be read per CCI read transaction */
-#define CCI_READ_MAX 12
-#define CCI_I2C_READ_MAX_RETRIES 3
-#define CCI_I2C_MAX_READ 8192
-#define CCI_I2C_MAX_WRITE 8192
-
#define PRIORITY_QUEUE (QUEUE_0)
#define SYNC_QUEUE (QUEUE_1)
@@ -125,6 +120,7 @@
uint16_t addr_type;
uint8_t *data;
uint16_t num_byte;
+ uint16_t data_type;
};
struct cam_cci_i2c_queue_info {
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_hwreg.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_hwreg.h
index c18593e..31c8e26 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_hwreg.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_hwreg.h
@@ -43,27 +43,36 @@
#define CCI_I2C_M0_Q0_LOAD_DATA_ADDR 0x00000310
#define CCI_IRQ_MASK_0_ADDR 0x00000c04
#define CCI_IRQ_MASK_0_RMSK 0x7fff7ff7
+#define CCI_IRQ_MASK_1_ADDR 0x00000c10
+#define CCI_IRQ_MASK_1_RMSK 0x00110000
#define CCI_IRQ_CLEAR_0_ADDR 0x00000c08
+#define CCI_IRQ_CLEAR_1_ADDR 0x00000c14
#define CCI_IRQ_STATUS_0_ADDR 0x00000c0c
+#define CCI_IRQ_STATUS_1_ADDR 0x00000c18
#define CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK 0x4000000
#define CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK 0x2000000
#define CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK 0x1000000
#define CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK 0x100000
#define CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK 0x10000
#define CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK 0x1000
+#define CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD 0x100000
#define CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK 0x100
#define CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK 0x10
#define CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK 0x18000EE6
#define CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK 0x60EE6000
#define CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK 0x1
+#define CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD 0x10000
+#define CCI_I2C_M0_RD_THRESHOLD_ADDR 0x00000120
+#define CCI_I2C_M1_RD_THRESHOLD_ADDR 0x00000220
+#define CCI_I2C_RD_THRESHOLD_VALUE 0x38
#define CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR 0x00000c00
#define DEBUG_TOP_REG_START 0x0
#define DEBUG_TOP_REG_COUNT 14
#define DEBUG_MASTER_REG_START 0x100
-#define DEBUG_MASTER_REG_COUNT 8
+#define DEBUG_MASTER_REG_COUNT 9
#define DEBUG_MASTER_QUEUE_REG_START 0x300
-#define DEBUG_MASTER_QUEUE_REG_COUNT 6
+#define DEBUG_MASTER_QUEUE_REG_COUNT 7
#define DEBUG_INTR_REG_START 0xC00
#define DEBUG_INTR_REG_COUNT 7
#endif /* _CAM_CCI_HWREG_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
index 295259d..e0b27ca 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
@@ -146,6 +146,10 @@
base + CCI_IRQ_MASK_0_ADDR);
cam_io_w_mb(CCI_IRQ_MASK_0_RMSK,
base + CCI_IRQ_CLEAR_0_ADDR);
+ cam_io_w_mb(CCI_IRQ_MASK_1_RMSK,
+ base + CCI_IRQ_MASK_1_ADDR);
+ cam_io_w_mb(CCI_IRQ_MASK_1_RMSK,
+ base + CCI_IRQ_CLEAR_1_ADDR);
cam_io_w_mb(0x1, base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
for (i = 0; i < MASTER_MAX; i++) {
@@ -157,6 +161,13 @@
flush_workqueue(cci_dev->write_wq[i]);
}
}
+
+ /* Set RD FIFO threshold for M0 & M1 */
+ cam_io_w_mb(CCI_I2C_RD_THRESHOLD_VALUE,
+ base + CCI_I2C_M0_RD_THRESHOLD_ADDR);
+ cam_io_w_mb(CCI_I2C_RD_THRESHOLD_VALUE,
+ base + CCI_I2C_M1_RD_THRESHOLD_ADDR);
+
cci_dev->cci_state = CCI_STATE_ENABLED;
return 0;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
index 30b9d96..6523607 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
@@ -109,6 +109,7 @@
rc = camera_io_dev_read_seq(&e_ctrl->io_master_info,
emap[j].mem.addr, memptr,
emap[j].mem.addr_type,
+ emap[j].mem.data_type,
emap[j].mem.valid_size);
if (rc) {
CAM_ERR(CAM_EEPROM, "read failed rc %d",
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
index 2c1f520..1ec7d9a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -33,6 +33,7 @@
cci_ctrl.cci_info = cci_client;
cci_ctrl.cfg.cci_i2c_read_cfg.addr = addr;
cci_ctrl.cfg.cci_i2c_read_cfg.addr_type = addr_type;
+ cci_ctrl.cfg.cci_i2c_read_cfg.data_type = data_type;
cci_ctrl.cfg.cci_i2c_read_cfg.data = buf;
cci_ctrl.cfg.cci_i2c_read_cfg.num_byte = data_type;
rc = v4l2_subdev_call(cci_client->cci_subdev,
@@ -59,6 +60,7 @@
int32_t cam_camera_cci_i2c_read_seq(struct cam_sensor_cci_client *cci_client,
uint32_t addr, uint8_t *data,
enum camera_sensor_i2c_type addr_type,
+ enum camera_sensor_i2c_type data_type,
uint32_t num_byte)
{
int32_t rc = -EFAULT;
@@ -67,6 +69,7 @@
struct cam_cci_ctrl cci_ctrl;
if ((addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+ || (data_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
|| (num_byte > I2C_REG_DATA_MAX)) {
CAM_ERR(CAM_SENSOR, "addr_type %d num_byte %d", addr_type,
num_byte);
@@ -81,6 +84,7 @@
cci_ctrl.cci_info = cci_client;
cci_ctrl.cfg.cci_i2c_read_cfg.addr = addr;
cci_ctrl.cfg.cci_i2c_read_cfg.addr_type = addr_type;
+ cci_ctrl.cfg.cci_i2c_read_cfg.data_type = data_type;
cci_ctrl.cfg.cci_i2c_read_cfg.data = buf;
cci_ctrl.cfg.cci_i2c_read_cfg.num_byte = num_byte;
cci_ctrl.status = -EFAULT;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
index 7cddcf9..e79fffb 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -46,6 +46,7 @@
* @addr: I2c address
* @data: I2C data
* @addr_type: I2c address type
+ * @data_type: I2c data type
* @num_byte: number of bytes
*
* This API handles CCI sequential read
@@ -53,6 +54,7 @@
int32_t cam_camera_cci_i2c_read_seq(struct cam_sensor_cci_client *client,
uint32_t addr, uint8_t *data,
enum camera_sensor_i2c_type addr_type,
+ enum camera_sensor_i2c_type data_type,
uint32_t num_byte);
/**
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
index 89aad4e..ed490fd 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
@@ -69,11 +69,12 @@
int32_t camera_io_dev_read_seq(struct camera_io_master *io_master_info,
uint32_t addr, uint8_t *data,
- enum camera_sensor_i2c_type addr_type, int32_t num_bytes)
+ enum camera_sensor_i2c_type addr_type,
+ enum camera_sensor_i2c_type data_type, int32_t num_bytes)
{
if (io_master_info->master_type == CCI_MASTER) {
return cam_camera_cci_i2c_read_seq(io_master_info->cci_client,
- addr, data, addr_type, num_bytes);
+ addr, data, addr_type, data_type, num_bytes);
} else if (io_master_info->master_type == I2C_MASTER) {
return cam_qup_i2c_read_seq(io_master_info->client,
addr, data, addr_type, num_bytes);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
index ec5ed25..f1143c8 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -52,6 +52,7 @@
* @io_master_info: I2C/SPI master information
* @addr: I2C address
* @data: I2C data
+ * @addr_type: I2C addr type
* @data_type: I2C data type
* @num_bytes: number of bytes
*
@@ -60,6 +61,7 @@
int32_t camera_io_dev_read_seq(struct camera_io_master *io_master_info,
uint32_t addr, uint8_t *data,
enum camera_sensor_i2c_type addr_type,
+ enum camera_sensor_i2c_type data_type,
int32_t num_bytes);
/**
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
index ba32526..dfcb73a 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
@@ -229,7 +229,9 @@
static int msm_csid_reset(struct csid_device *csid_dev)
{
int32_t rc = 0;
+ uint32_t irq = 0, irq_bitshift;
+ irq_bitshift = csid_dev->ctrl_reg->csid_reg.csid_rst_done_irq_bitshift;
msm_camera_io_w(csid_dev->ctrl_reg->csid_reg.csid_rst_stb_all,
csid_dev->base +
csid_dev->ctrl_reg->csid_reg.csid_rst_cmd_addr);
@@ -238,8 +240,23 @@
if (rc < 0) {
pr_err("wait_for_completion in msm_csid_reset fail rc = %d\n",
rc);
+ } else if (rc == 0) {
+ irq = msm_camera_io_r(csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_status_addr);
+ pr_err_ratelimited("%s CSID%d_IRQ_STATUS_ADDR = 0x%x\n",
+ __func__, csid_dev->pdev->id, irq);
+ if (irq & (0x1 << irq_bitshift)) {
+ rc = 1;
+ CDBG("%s succeeded", __func__);
+ } else {
+ rc = 0;
+ pr_err("%s reset csid_irq_status failed = 0x%x\n",
+ __func__, irq);
+ }
if (rc == 0)
rc = -ETIMEDOUT;
+ } else {
+ CDBG("%s succeeded", __func__);
}
return rc;
}
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 9a7d272..3679c59 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -2275,6 +2275,7 @@
enable.enable = 0;
pdata = &enable;
inst->clk_data.low_latency_mode = (bool) enable.enable;
+ msm_dcvs_try_enable(inst);
break;
}
case V4L2_CID_MPEG_VIDC_VIDEO_H264_TRANSFORM_8x8:
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index 6ae030f..a665978 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -609,6 +609,7 @@
struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
u64 rate = 0;
struct clock_data *dcvs = NULL;
+ u32 operating_rate, vsp_factor_num = 10, vsp_factor_den = 7;
core = inst->core;
dcvs = &inst->clk_data;
@@ -631,8 +632,19 @@
vsp_cycles = mbs_per_second * inst->clk_data.entry->vsp_cycles;
- /* 10 / 7 is overhead factor */
- vsp_cycles += (inst->clk_data.bitrate * 10) / 7;
+ operating_rate = inst->clk_data.operating_rate >> 16;
+ if (operating_rate > inst->prop.fps && inst->prop.fps) {
+ vsp_factor_num *= operating_rate;
+ vsp_factor_den *= inst->prop.fps;
+ }
+ //adjust factor for 2 core case, due to workload is not
+ //equally distributed on 2 cores, use 0.65 instead of 0.5
+ if (inst->clk_data.core_id == VIDC_CORE_ID_3) {
+ vsp_factor_num = vsp_factor_num * 13 / 10;
+ vsp_factor_den *= 2;
+ }
+ vsp_cycles += ((u64)inst->clk_data.bitrate * vsp_factor_num) /
+ vsp_factor_den;
} else if (inst->session_type == MSM_VIDC_DECODER) {
vpp_cycles = mbs_per_second * inst->clk_data.entry->vpp_cycles;
diff --git a/drivers/media/platform/msm/vidc_3x/hfi_packetization.c b/drivers/media/platform/msm/vidc_3x/hfi_packetization.c
index b15baaa..1de5bd1 100644
--- a/drivers/media/platform/msm/vidc_3x/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc_3x/hfi_packetization.c
@@ -658,9 +658,12 @@
case HAL_EXTRADATA_STREAM_USERDATA:
ret = HFI_PROPERTY_PARAM_VDEC_STREAM_USERDATA_EXTRADATA;
break;
- case HAL_EXTRADATA_FRAME_QP:
+ case HAL_EXTRADATA_DEC_FRAME_QP:
ret = HFI_PROPERTY_PARAM_VDEC_FRAME_QP_EXTRADATA;
break;
+ case HAL_EXTRADATA_ENC_FRAME_QP:
+ ret = HFI_PROPERTY_PARAM_VENC_FRAME_QP_EXTRADATA;
+ break;
case HAL_EXTRADATA_FRAME_BITS_INFO:
ret = HFI_PROPERTY_PARAM_VDEC_FRAME_BITS_INFO_EXTRADATA;
break;
diff --git a/drivers/media/platform/msm/vidc_3x/msm_venc.c b/drivers/media/platform/msm/vidc_3x/msm_venc.c
index d129dc2..ef6e360 100644
--- a/drivers/media/platform/msm/vidc_3x/msm_venc.c
+++ b/drivers/media/platform/msm/vidc_3x/msm_venc.c
@@ -823,7 +823,7 @@
.name = "Extradata Type",
.type = V4L2_CTRL_TYPE_MENU,
.minimum = V4L2_MPEG_VIDC_EXTRADATA_NONE,
- .maximum = V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO,
+ .maximum = V4L2_MPEG_VIDC_EXTRADATA_ENC_FRAME_QP,
.default_value = V4L2_MPEG_VIDC_EXTRADATA_NONE,
.menu_skip_mask = ~(
(1 << V4L2_MPEG_VIDC_EXTRADATA_NONE) |
@@ -846,7 +846,8 @@
(1 << V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI) |
(1 << V4L2_MPEG_VIDC_EXTRADATA_YUV_STATS)|
(1 << V4L2_MPEG_VIDC_EXTRADATA_ROI_QP) |
- (1 << V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO)
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO) |
+ (1ULL << V4L2_MPEG_VIDC_EXTRADATA_ENC_FRAME_QP)
),
.qmenu = mpeg_video_vidc_extradata,
},
@@ -1564,6 +1565,7 @@
case V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB:
case V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER:
case V4L2_MPEG_VIDC_EXTRADATA_LTR:
+ case V4L2_MPEG_VIDC_EXTRADATA_ENC_FRAME_QP:
case V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI:
inst->fmts[CAPTURE_PORT].num_planes = 2;
default:
diff --git a/drivers/media/platform/msm/vidc_3x/msm_vidc_common.c b/drivers/media/platform/msm/vidc_3x/msm_vidc_common.c
index bd58117..502a5c7 100644
--- a/drivers/media/platform/msm/vidc_3x/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc_3x/msm_vidc_common.c
@@ -77,9 +77,11 @@
"Extradata output crop",
"Extradata display colour SEI",
"Extradata light level SEI",
+ "Extradata PQ Info",
"Extradata display VUI",
"Extradata vpx color space",
- "Extradata PQ Info",
+ "Extradata UBWC CR stats info",
+ "Extradata enc frame QP"
};
struct getprop_buf {
@@ -4727,7 +4729,10 @@
ret = HAL_EXTRADATA_STREAM_USERDATA;
break;
case V4L2_MPEG_VIDC_EXTRADATA_FRAME_QP:
- ret = HAL_EXTRADATA_FRAME_QP;
+ ret = HAL_EXTRADATA_DEC_FRAME_QP;
+ break;
+ case V4L2_MPEG_VIDC_EXTRADATA_ENC_FRAME_QP:
+ ret = HAL_EXTRADATA_ENC_FRAME_QP;
break;
case V4L2_MPEG_VIDC_EXTRADATA_FRAME_BITS_INFO:
ret = HAL_EXTRADATA_FRAME_BITS_INFO;
diff --git a/drivers/media/platform/msm/vidc_3x/msm_vidc_internal.h b/drivers/media/platform/msm/vidc_3x/msm_vidc_internal.h
index 93368f6..c7eb5f1 100644
--- a/drivers/media/platform/msm/vidc_3x/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc_3x/msm_vidc_internal.h
@@ -316,7 +316,7 @@
s32 maximum;
s32 default_value;
u32 step;
- u32 menu_skip_mask;
+ u64 menu_skip_mask;
u32 flags;
const char * const *qmenu;
};
diff --git a/drivers/media/platform/msm/vidc_3x/vidc_hfi_api.h b/drivers/media/platform/msm/vidc_3x/vidc_hfi_api.h
index 1a25a58..875db09 100644
--- a/drivers/media/platform/msm/vidc_3x/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc_3x/vidc_hfi_api.h
@@ -112,7 +112,8 @@
HAL_EXTRADATA_ASPECT_RATIO,
HAL_EXTRADATA_MPEG2_SEQDISP,
HAL_EXTRADATA_STREAM_USERDATA,
- HAL_EXTRADATA_FRAME_QP,
+ HAL_EXTRADATA_DEC_FRAME_QP,
+ HAL_EXTRADATA_ENC_FRAME_QP,
HAL_EXTRADATA_FRAME_BITS_INFO,
HAL_EXTRADATA_INPUT_CROP,
HAL_EXTRADATA_DIGITAL_ZOOM,
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index e3f4c39..a233173 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -825,4 +825,5 @@
source "drivers/misc/genwqe/Kconfig"
source "drivers/misc/echo/Kconfig"
source "drivers/misc/cxl/Kconfig"
+source "drivers/misc/fpr_FingerprintCard/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index f1c9467..8e5d0f6 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -57,6 +57,7 @@
obj-$(CONFIG_CXL_BASE) += cxl/
obj-$(CONFIG_PANEL) += panel.o
obj-$(CONFIG_QPNP_MISC) += qpnp-misc.o
+obj-$(CONFIG_FPR_FPC) += fpr_FingerprintCard/
obj-$(CONFIG_MEMORY_STATE_TIME) += memory_state_time.o
obj-$(CONFIG_UID_SYS_STATS) += uid_sys_stats.o
diff --git a/drivers/misc/fpr_FingerprintCard/Kconfig b/drivers/misc/fpr_FingerprintCard/Kconfig
new file mode 100644
index 0000000..c9599e6
--- /dev/null
+++ b/drivers/misc/fpr_FingerprintCard/Kconfig
@@ -0,0 +1,10 @@
+#
+# FingerprintCard fingerprint driver
+#
+menu "FingerprintCard fingerprint driver"
+config FPR_FPC
+ default n
+ tristate "FPC_BTP fingerprint sensor support"
+ depends on SPI_MASTER
+
+endmenu
diff --git a/drivers/misc/fpr_FingerprintCard/Makefile b/drivers/misc/fpr_FingerprintCard/Makefile
new file mode 100644
index 0000000..96681eb
--- /dev/null
+++ b/drivers/misc/fpr_FingerprintCard/Makefile
@@ -0,0 +1,5 @@
+# Makefile for FingerprintCard fingerprint driver
+
+fpc1020-objs := fpc1020_platform_tee.o
+obj-$(CONFIG_FPR_FPC) += fpc1020.o
+
diff --git a/drivers/misc/fpr_FingerprintCard/fpc1020_platform_tee.c b/drivers/misc/fpr_FingerprintCard/fpc1020_platform_tee.c
new file mode 100644
index 0000000..887c8eb
--- /dev/null
+++ b/drivers/misc/fpr_FingerprintCard/fpc1020_platform_tee.c
@@ -0,0 +1,683 @@
+/*
+ * FPC1020 Fingerprint sensor device driver
+ *
+ * This driver will control the platform resources that the FPC fingerprint
+ * sensor needs to operate. The major things are probing the sensor to check
+ * that it is actually connected and let the Kernel know this and with that also
+ * enabling and disabling of regulators, controlling GPIOs such as sensor reset
+ * line, sensor IRQ line.
+ *
+ * The driver will expose most of its available functionality in sysfs which
+ * enables dynamic control of these features from eg. a user space process.
+ *
+ * The sensor's IRQ events will be pushed to Kernel's event handling system and
+ * are exposed in the drivers event node.
+ *
+ * This driver will NOT send any commands to the sensor it only controls the
+ * electrical parts.
+ *
+ *
+ * Copyright (c) 2015 Fingerprint Cards AB <tech@fingerprints.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License Version 2
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pinctrl/consumer.h>
+
+
+#define FPC_TTW_HOLD_TIME 1000
+#define RESET_LOW_SLEEP_MIN_US 5000
+#define RESET_LOW_SLEEP_MAX_US (RESET_LOW_SLEEP_MIN_US + 100)
+#define RESET_HIGH_SLEEP1_MIN_US 100
+#define RESET_HIGH_SLEEP1_MAX_US (RESET_HIGH_SLEEP1_MIN_US + 100)
+#define RESET_HIGH_SLEEP2_MIN_US 5000
+#define RESET_HIGH_SLEEP2_MAX_US (RESET_HIGH_SLEEP2_MIN_US + 100)
+#define PWR_ON_SLEEP_MIN_US 100
+#define PWR_ON_SLEEP_MAX_US (PWR_ON_SLEEP_MIN_US + 900)
+#define NUM_PARAMS_REG_ENABLE_SET 2
+
+#define RELEASE_WAKELOCK_W_V "release_wakelock_with_verification"
+#define RELEASE_WAKELOCK "release_wakelock"
+#define START_IRQS_RECEIVED_CNT "start_irqs_received_counter"
+
+static const char * const pctl_names[] = {
+ "fpc1020_reset_reset",
+ "fpc1020_reset_active",
+ "fpc1020_irq_active",
+};
+
+struct vreg_config {
+ char *name;
+ unsigned long vmin;
+ unsigned long vmax;
+ int ua_load;
+};
+
+static const struct vreg_config vreg_conf[] = {
+ { "vdd_ana", 1800000UL, 1800000UL, 6000, },
+ { "vcc_spi", 1800000UL, 1800000UL, 10, },
+ { "vdd_io", 1800000UL, 1800000UL, 6000, },
+};
+
+struct fpc1020_data {
+ struct device *dev;
+ struct pinctrl *fingerprint_pinctrl;
+ struct pinctrl_state **pinctrl_state;
+ struct regulator **vreg;
+ struct wakeup_source ttw_wl;
+ struct mutex lock; /* To set/get exported values in sysfs */
+ int irq_gpio;
+ int rst_gpio;
+ int nbr_irqs_received;
+ int nbr_irqs_received_counter_start;
+ bool prepared;
+ atomic_t wakeup_enabled; /* Used both in ISR and non-ISR */
+};
+
+static int vreg_setup(struct fpc1020_data *fpc1020, const char *name,
+ bool enable)
+{
+ size_t i;
+ int rc;
+ struct regulator *vreg;
+ struct device *dev = fpc1020->dev;
+
+ for (i = 0; i < ARRAY_SIZE(vreg_conf); i++) {
+ const char *n = vreg_conf[i].name;
+
+ if (!memcmp(n, name, strlen(n)))
+ goto found;
+ }
+
+ dev_err(dev, "Regulator %s not found\n", name);
+
+ return -EINVAL;
+
+found:
+ vreg = fpc1020->vreg[i];
+ if (enable) {
+ if (!vreg) {
+ vreg = devm_regulator_get(dev, name);
+ if (IS_ERR_OR_NULL(vreg)) {
+ dev_err(dev, "Unable to get %s\n", name);
+ return PTR_ERR(vreg);
+ }
+ }
+
+ if (regulator_count_voltages(vreg) > 0) {
+ rc = regulator_set_voltage(vreg, vreg_conf[i].vmin,
+ vreg_conf[i].vmax);
+ if (rc)
+ dev_err(dev,
+ "Unable to set voltage on %s, %d\n",
+ name, rc);
+ }
+
+ rc = regulator_set_load(vreg, vreg_conf[i].ua_load);
+ if (rc < 0)
+ dev_err(dev, "Unable to set current on %s, %d\n",
+ name, rc);
+
+ rc = regulator_enable(vreg);
+ if (rc) {
+ dev_err(dev, "error enabling %s: %d\n", name, rc);
+ vreg = NULL;
+ }
+ fpc1020->vreg[i] = vreg;
+ } else {
+ if (vreg) {
+ if (regulator_is_enabled(vreg)) {
+ regulator_disable(vreg);
+ dev_dbg(dev, "disabled %s\n", name);
+ }
+ fpc1020->vreg[i] = NULL;
+ }
+ rc = 0;
+ }
+
+ return rc;
+}
+
+/*
+ * sysfs node for controlling clocks.
+ *
+ * This is disabled in platform variant of this driver but kept for
+ * backwards compatibility. Only prints a debug print that it is
+ * disabled.
+ */
+static ssize_t clk_enable_set(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ dev_dbg(dev,
+ "clk_enable sysfs node not enabled in platform driver\n");
+
+ return count;
+}
+static DEVICE_ATTR(clk_enable, 0200, NULL, clk_enable_set);
+
+/*
+ * Will try to select the set of pins (GPIOS) defined in a pin control node of
+ * the device tree named @p name.
+ *
+ * The node can contain several eg. GPIOs that is controlled when selecting it.
+ * The node may activate or deactivate the pins it contains, the action is
+ * defined in the device tree node itself and not here. The states used
+ * internally is fetched at probe time.
+ *
+ * @see pctl_names
+ * @see fpc1020_probe
+ */
+static int select_pin_ctl(struct fpc1020_data *fpc1020, const char *name)
+{
+ size_t i;
+ int rc;
+ struct device *dev = fpc1020->dev;
+
+ for (i = 0; i < ARRAY_SIZE(pctl_names); i++) {
+ const char *n = pctl_names[i];
+
+ if (!memcmp(n, name, strlen(n))) {
+ rc = pinctrl_select_state(fpc1020->fingerprint_pinctrl,
+ fpc1020->pinctrl_state[i]);
+ if (rc)
+ dev_err(dev, "cannot select '%s'\n", name);
+ else
+ dev_dbg(dev, "Selected '%s'\n", name);
+ goto exit;
+ }
+ }
+
+ rc = -EINVAL;
+ dev_err(dev, "%s:'%s' not found\n", __func__, name);
+
+exit:
+ return rc;
+}
+
+static ssize_t pinctl_set(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct fpc1020_data *fpc1020 = dev_get_drvdata(dev);
+ int rc;
+
+ mutex_lock(&fpc1020->lock);
+ rc = select_pin_ctl(fpc1020, buf);
+ mutex_unlock(&fpc1020->lock);
+
+ return rc ? rc : count;
+}
+static DEVICE_ATTR(pinctl_set, 0200, NULL, pinctl_set);
+
+static ssize_t regulator_enable_set(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct fpc1020_data *fpc1020 = dev_get_drvdata(dev);
+ char op;
+ char name[16];
+ int rc;
+ bool enable;
+
+ if (sscanf(buf, "%15[^,],%c", name, &op) != NUM_PARAMS_REG_ENABLE_SET)
+ return -EINVAL;
+ if (op == 'e')
+ enable = true;
+ else if (op == 'd')
+ enable = false;
+ else
+ return -EINVAL;
+
+ mutex_lock(&fpc1020->lock);
+ rc = vreg_setup(fpc1020, name, enable);
+ mutex_unlock(&fpc1020->lock);
+
+ return rc ? rc : count;
+}
+static DEVICE_ATTR(regulator_enable, 0200, NULL, regulator_enable_set);
+
+static int hw_reset(struct fpc1020_data *fpc1020)
+{
+ int irq_gpio;
+ int rc;
+
+ irq_gpio = gpio_get_value(fpc1020->irq_gpio);
+
+ rc = select_pin_ctl(fpc1020, "fpc1020_reset_active");
+
+ if (rc)
+ goto exit;
+
+ usleep_range(RESET_HIGH_SLEEP1_MIN_US, RESET_HIGH_SLEEP1_MAX_US);
+
+ rc = select_pin_ctl(fpc1020, "fpc1020_reset_reset");
+
+ if (rc)
+ goto exit;
+ usleep_range(RESET_LOW_SLEEP_MIN_US, RESET_LOW_SLEEP_MAX_US);
+
+ rc = select_pin_ctl(fpc1020, "fpc1020_reset_active");
+ if (rc)
+ goto exit;
+ usleep_range(RESET_HIGH_SLEEP2_MIN_US, RESET_HIGH_SLEEP2_MAX_US);
+
+ irq_gpio = gpio_get_value(fpc1020->irq_gpio);
+
+exit:
+ return rc;
+}
+
+static ssize_t hw_reset_set(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int rc = -EINVAL;
+ struct fpc1020_data *fpc1020 = dev_get_drvdata(dev);
+
+ if (!memcmp(buf, "reset", strlen("reset"))) {
+ mutex_lock(&fpc1020->lock);
+ rc = hw_reset(fpc1020);
+ mutex_unlock(&fpc1020->lock);
+ } else {
+ return rc;
+ }
+
+ return rc ? rc : count;
+}
+static DEVICE_ATTR(hw_reset, 0200, NULL, hw_reset_set);
+
+/*
+ * Will setup GPIOs, and regulators to correctly initialize the touch sensor to
+ * be ready for work.
+ *
+ * In the correct order according to the sensor spec this function will
+ * enable/disable regulators, and reset line, all to set the sensor in a
+ * correct power on or off state "electrical" wise.
+ *
+ * @see device_prepare_set
+ * @note This function will not send any commands to the sensor it will only
+ * control it "electrically".
+ */
+static int device_prepare(struct fpc1020_data *fpc1020, bool enable)
+{
+ int rc = 0;
+
+ mutex_lock(&fpc1020->lock);
+ if (enable && !fpc1020->prepared) {
+ fpc1020->prepared = true;
+ select_pin_ctl(fpc1020, "fpc1020_reset_reset");
+
+ rc = vreg_setup(fpc1020, "vcc_spi", true);
+ if (rc)
+ goto exit;
+
+ rc = vreg_setup(fpc1020, "vdd_io", true);
+ if (rc)
+ goto exit_1;
+
+ rc = vreg_setup(fpc1020, "vdd_ana", true);
+ if (rc)
+ goto exit_2;
+
+ usleep_range(PWR_ON_SLEEP_MIN_US, PWR_ON_SLEEP_MAX_US);
+
+ (void)select_pin_ctl(fpc1020, "fpc1020_reset_active");
+ } else if (!enable && fpc1020->prepared) {
+ rc = 0;
+ (void)select_pin_ctl(fpc1020, "fpc1020_reset_reset");
+
+ usleep_range(PWR_ON_SLEEP_MIN_US, PWR_ON_SLEEP_MAX_US);
+
+ (void)vreg_setup(fpc1020, "vdd_ana", false);
+exit_2:
+ (void)vreg_setup(fpc1020, "vdd_io", false);
+exit_1:
+ (void)vreg_setup(fpc1020, "vcc_spi", false);
+exit:
+ fpc1020->prepared = false;
+ }
+
+ mutex_unlock(&fpc1020->lock);
+
+ return rc;
+}
+
+/*
+ * sysfs node to enable/disable (power up/power down) the touch sensor
+ *
+ * @see device_prepare
+ */
+static ssize_t device_prepare_set(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int rc;
+ struct fpc1020_data *fpc1020 = dev_get_drvdata(dev);
+
+ if (!memcmp(buf, "enable", strlen("enable")))
+ rc = device_prepare(fpc1020, true);
+ else if (!memcmp(buf, "disable", strlen("disable")))
+ rc = device_prepare(fpc1020, false);
+ else
+ return -EINVAL;
+
+ return rc ? rc : count;
+}
+static DEVICE_ATTR(device_prepare, 0200, NULL, device_prepare_set);
+
+/**
+ * sysfs node for controlling whether the driver is allowed
+ * to wake up the platform on interrupt.
+ */
+static ssize_t wakeup_enable_set(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct fpc1020_data *fpc1020 = dev_get_drvdata(dev);
+ ssize_t ret = count;
+
+ mutex_lock(&fpc1020->lock);
+ if (!memcmp(buf, "enable", strlen("enable")))
+ atomic_set(&fpc1020->wakeup_enabled, 1);
+ else if (!memcmp(buf, "disable", strlen("disable")))
+ atomic_set(&fpc1020->wakeup_enabled, 0);
+ else
+ ret = -EINVAL;
+ mutex_unlock(&fpc1020->lock);
+
+ return ret;
+}
+static DEVICE_ATTR(wakeup_enable, 0200, NULL, wakeup_enable_set);
+
+
+/*
+ * sysfs node for controlling the wakelock.
+ */
+static ssize_t handle_wakelock_cmd(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct fpc1020_data *fpc1020 = dev_get_drvdata(dev);
+ ssize_t ret = count;
+
+ mutex_lock(&fpc1020->lock);
+ if (!memcmp(buf, RELEASE_WAKELOCK_W_V,
+ min(count, strlen(RELEASE_WAKELOCK_W_V)))) {
+ if (fpc1020->nbr_irqs_received_counter_start ==
+ fpc1020->nbr_irqs_received) {
+ __pm_relax(&fpc1020->ttw_wl);
+ } else {
+ dev_dbg(dev, "Ignore releasing of wakelock %d != %d",
+ fpc1020->nbr_irqs_received_counter_start,
+ fpc1020->nbr_irqs_received);
+ }
+ } else if (!memcmp(buf, RELEASE_WAKELOCK, min(count,
+ strlen(RELEASE_WAKELOCK)))) {
+ __pm_relax(&fpc1020->ttw_wl);
+ } else if (!memcmp(buf, START_IRQS_RECEIVED_CNT,
+ min(count, strlen(START_IRQS_RECEIVED_CNT)))) {
+ fpc1020->nbr_irqs_received_counter_start =
+ fpc1020->nbr_irqs_received;
+ } else
+ ret = -EINVAL;
+ mutex_unlock(&fpc1020->lock);
+
+ return ret;
+}
+static DEVICE_ATTR(handle_wakelock, 0200, NULL, handle_wakelock_cmd);
+
+/*
+ * sysf node to check the interrupt status of the sensor, the interrupt
+ * handler should perform sysf_notify to allow userland to poll the node.
+ */
+static ssize_t irq_get(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fpc1020_data *fpc1020 = dev_get_drvdata(dev);
+ int irq = gpio_get_value(fpc1020->irq_gpio);
+
+ return scnprintf(buf, PAGE_SIZE, "%i\n", irq);
+}
+
+/*
+ * writing to the irq node will just drop a printk message
+ * and return success, used for latency measurement.
+ */
+static ssize_t irq_ack(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fpc1020_data *fpc1020 = dev_get_drvdata(dev);
+
+ dev_dbg(fpc1020->dev, "%s\n", __func__);
+
+ return count;
+}
+static DEVICE_ATTR(irq, 0600 | 0200, irq_get, irq_ack);
+
+static struct attribute *attributes[] = {
+ &dev_attr_pinctl_set.attr,
+ &dev_attr_device_prepare.attr,
+ &dev_attr_regulator_enable.attr,
+ &dev_attr_hw_reset.attr,
+ &dev_attr_wakeup_enable.attr,
+ &dev_attr_handle_wakelock.attr,
+ &dev_attr_clk_enable.attr,
+ &dev_attr_irq.attr,
+ NULL
+};
+
+static const struct attribute_group attribute_group = {
+ .attrs = attributes,
+};
+
+static irqreturn_t fpc1020_irq_handler(int irq, void *handle)
+{
+ struct fpc1020_data *fpc1020 = handle;
+
+ pr_info("fpc1020 irq handler: %s\n", __func__);
+ mutex_lock(&fpc1020->lock);
+ if (atomic_read(&fpc1020->wakeup_enabled)) {
+ fpc1020->nbr_irqs_received++;
+ __pm_wakeup_event(&fpc1020->ttw_wl,
+ msecs_to_jiffies(FPC_TTW_HOLD_TIME));
+ }
+ mutex_unlock(&fpc1020->lock);
+
+ sysfs_notify(&fpc1020->dev->kobj, NULL, dev_attr_irq.attr.name);
+
+ return IRQ_HANDLED;
+}
+
+static int fpc1020_request_named_gpio(struct fpc1020_data *fpc1020,
+ const char *label, int *gpio)
+{
+ struct device *dev = fpc1020->dev;
+ struct device_node *np = dev->of_node;
+ int rc;
+
+ rc = of_get_named_gpio(np, label, 0);
+
+ if (rc < 0) {
+ dev_err(dev, "failed to get '%s'\n", label);
+ return rc;
+ }
+ *gpio = rc;
+
+ rc = devm_gpio_request(dev, *gpio, label);
+ if (rc) {
+ dev_err(dev, "failed to request gpio %d\n", *gpio);
+ return rc;
+ }
+ dev_dbg(dev, "%s %d\n", label, *gpio);
+
+ return 0;
+}
+
+static int fpc1020_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int rc = 0;
+ size_t i;
+ int irqf;
+ struct fpc1020_data *fpc1020 = devm_kzalloc(dev, sizeof(*fpc1020),
+ GFP_KERNEL);
+ if (!fpc1020) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+
+ fpc1020->dev = dev;
+ platform_set_drvdata(pdev, fpc1020);
+
+ rc = fpc1020_request_named_gpio(fpc1020, "fpc,gpio_irq",
+ &fpc1020->irq_gpio);
+ if (rc)
+ goto exit;
+ rc = fpc1020_request_named_gpio(fpc1020, "fpc,gpio_rst",
+ &fpc1020->rst_gpio);
+ if (rc)
+ goto exit;
+
+ fpc1020->fingerprint_pinctrl = devm_pinctrl_get(dev);
+ if (IS_ERR(fpc1020->fingerprint_pinctrl)) {
+ if (PTR_ERR(fpc1020->fingerprint_pinctrl) == -EPROBE_DEFER) {
+ dev_info(dev, "pinctrl not ready\n");
+ rc = -EPROBE_DEFER;
+ goto exit;
+ }
+ dev_err(dev, "Target does not use pinctrl\n");
+ fpc1020->fingerprint_pinctrl = NULL;
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pctl_names); i++) {
+ const char *n = pctl_names[i];
+ struct pinctrl_state *state =
+ pinctrl_lookup_state(fpc1020->fingerprint_pinctrl, n);
+ if (IS_ERR(state)) {
+ dev_err(dev, "cannot find '%s'\n", n);
+ rc = -EINVAL;
+ goto exit;
+ }
+ dev_info(dev, "found pin control %s\n", n);
+ fpc1020->pinctrl_state[i] = state;
+ }
+
+ rc = select_pin_ctl(fpc1020, "fpc1020_reset_reset");
+ if (rc)
+ goto exit;
+ rc = select_pin_ctl(fpc1020, "fpc1020_irq_active");
+ if (rc)
+ goto exit;
+
+ atomic_set(&fpc1020->wakeup_enabled, 0);
+
+ irqf = IRQF_TRIGGER_RISING | IRQF_ONESHOT;
+ if (of_property_read_bool(dev->of_node, "fpc,enable-wakeup")) {
+ irqf |= IRQF_NO_SUSPEND;
+ device_init_wakeup(dev, 1);
+ }
+
+ mutex_init(&fpc1020->lock);
+ rc = devm_request_threaded_irq(dev, gpio_to_irq(fpc1020->irq_gpio),
+ NULL, fpc1020_irq_handler, irqf,
+ dev_name(dev), fpc1020);
+ if (rc) {
+ dev_err(dev, "could not request irq %d\n",
+ gpio_to_irq(fpc1020->irq_gpio));
+ goto exit;
+ }
+
+ dev_info(dev, "requested irq %d\n", gpio_to_irq(fpc1020->irq_gpio));
+
+ /* Request that the interrupt should be wakeable */
+ enable_irq_wake(gpio_to_irq(fpc1020->irq_gpio));
+
+ wakeup_source_init(&fpc1020->ttw_wl, "fpc_ttw_wl");
+
+ rc = sysfs_create_group(&dev->kobj, &attribute_group);
+ if (rc) {
+ dev_err(dev, "could not create sysfs\n");
+ goto exit;
+ }
+
+ if (of_property_read_bool(dev->of_node, "fpc,enable-on-boot")) {
+ dev_info(dev, "Enabling hardware\n");
+ (void)device_prepare(fpc1020, true);
+ }
+
+ rc = hw_reset(fpc1020);
+
+ dev_info(dev, "%s: ok\n", __func__);
+
+exit:
+ return rc;
+}
+
+static int fpc1020_remove(struct platform_device *pdev)
+{
+ struct fpc1020_data *fpc1020 = platform_get_drvdata(pdev);
+
+ sysfs_remove_group(&pdev->dev.kobj, &attribute_group);
+ mutex_destroy(&fpc1020->lock);
+ wakeup_source_trash(&fpc1020->ttw_wl);
+ (void)vreg_setup(fpc1020, "vdd_ana", false);
+ (void)vreg_setup(fpc1020, "vdd_io", false);
+ (void)vreg_setup(fpc1020, "vcc_spi", false);
+ dev_info(&pdev->dev, "%s\n", __func__);
+
+ return 0;
+}
+
+static const struct of_device_id fpc1020_of_match[] = {
+ { .compatible = "fpc,fpc1020", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, fpc1020_of_match);
+
+static struct platform_driver fpc1020_driver = {
+ .driver = {
+ .name = "fpc1020",
+ .owner = THIS_MODULE,
+ .of_match_table = fpc1020_of_match,
+ },
+ .probe = fpc1020_probe,
+ .remove = fpc1020_remove,
+};
+
+static int __init fpc1020_init(void)
+{
+ int rc = platform_driver_register(&fpc1020_driver);
+
+ if (!rc)
+ pr_info("%s OK\n", __func__);
+ else
+ pr_err("%s %d\n", __func__, rc);
+
+ return rc;
+}
+
+static void __exit fpc1020_exit(void)
+{
+ pr_info("%s\n", __func__);
+ platform_driver_unregister(&fpc1020_driver);
+}
+
+module_init(fpc1020_init);
+module_exit(fpc1020_exit);
+
+
+MODULE_DESCRIPTION("FPC1020 Fingerprint sensor device driver.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index aa8373d..66165d9 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -314,6 +314,8 @@
host->max_req_size / 512));
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
blk_queue_max_segments(mq->queue, host->max_segs);
+ if (host->inlinecrypt_support)
+ queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, mq->queue);
}
/**
@@ -483,6 +485,9 @@
success:
sema_init(&mq->thread_sem, 1);
+ if (host->inlinecrypt_support)
+ queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, mq->queue);
+
/* hook for pm qos legacy init */
if (card->host->ops->init)
card->host->ops->init(card->host);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 8700e72..60f8a6d 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -3393,7 +3393,22 @@
if (host->bus_ops && !host->bus_dead && host->card) {
mmc_power_up(host, host->card->ocr);
BUG_ON(!host->bus_ops->resume);
- host->bus_ops->resume(host);
+ err = host->bus_ops->resume(host);
+ if (err) {
+ pr_err("%s: %s: resume failed: %d\n",
+ mmc_hostname(host), __func__, err);
+ /*
+ * If we have cd-gpio based detection mechanism and
+ * deferred resume is supported, we will not detect
+ * card removal event when system is suspended. So if
+ * resume fails after a system suspend/resume,
+ * schedule the work to detect card presence.
+ */
+ if (mmc_card_is_removable(host) &&
+ !(host->caps & MMC_CAP_NEEDS_POLL)) {
+ mmc_detect_change(host, 0);
+ }
+ }
if (mmc_card_cmdq(host->card)) {
err = mmc_cmdq_halt(host, false);
if (err)
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 965d1f0..245493e 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1329,6 +1329,8 @@
mmc_hostname(host), __func__, err);
mmc_card_set_removed(host->card);
mmc_detect_change(host, msecs_to_jiffies(200));
+ } else if (err) {
+ goto out;
}
mmc_card_clr_suspended(host->card);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index d91eb67..7d24213 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -5075,6 +5075,7 @@
goto vreg_deinit;
}
host->is_crypto_en = true;
+ msm_host->mmc->inlinecrypt_support = true;
/* Packed commands cannot be encrypted/decrypted using ICE */
msm_host->mmc->caps2 &= ~(MMC_CAP2_PACKED_WR |
MMC_CAP2_PACKED_WR_CONTROL);
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index f7c63cf..f70420f 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -21,11 +21,16 @@
#include "ftm.h"
#define WIL_MAX_ROC_DURATION_MS 5000
+#define CTRY_CHINA "CN"
bool disable_ap_sme;
module_param(disable_ap_sme, bool, 0444);
MODULE_PARM_DESC(disable_ap_sme, " let user space handle AP mode SME");
+static bool country_specific_board_file;
+module_param(country_specific_board_file, bool, 0444);
+MODULE_PARM_DESC(country_specific_board_file, " switch board file upon regulatory domain change (Default: false)");
+
static bool ignore_reg_hints = true;
module_param(ignore_reg_hints, bool, 0444);
MODULE_PARM_DESC(ignore_reg_hints, " Ignore OTA regulatory hints (Default: true)");
@@ -1984,6 +1989,64 @@
return 0;
}
+static int wil_switch_board_file(struct wil6210_priv *wil,
+ const u8 *new_regdomain)
+{
+ int rc = 0;
+
+ if (!country_specific_board_file)
+ return 0;
+
+ if (memcmp(wil->regdomain, CTRY_CHINA, 2) == 0) {
+ wil_info(wil, "moving out of China reg domain, use default board file\n");
+ wil->board_file_country[0] = '\0';
+ } else if (memcmp(new_regdomain, CTRY_CHINA, 2) == 0) {
+ wil_info(wil, "moving into China reg domain, use country specific board file\n");
+ strlcpy(wil->board_file_country, CTRY_CHINA,
+ sizeof(wil->board_file_country));
+ } else {
+ return 0;
+ }
+
+ /* need to switch board file - reset the device */
+
+ mutex_lock(&wil->mutex);
+
+ if (!netif_running(wil_to_ndev(wil)) || wil_is_recovery_blocked(wil))
+ /* new board file will be used in next FW load */
+ goto out;
+
+ __wil_down(wil);
+ rc = __wil_up(wil);
+
+out:
+ mutex_unlock(&wil->mutex);
+ return rc;
+}
+
+static void wil_cfg80211_reg_notify(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+
+ wil_info(wil, "cfg reg_notify %c%c%s%s initiator %d hint_type %d\n",
+ request->alpha2[0], request->alpha2[1],
+ request->intersect ? " intersect" : "",
+ request->processed ? " processed" : "",
+ request->initiator, request->user_reg_hint_type);
+
+ if (memcmp(wil->regdomain, request->alpha2, 2) == 0)
+ /* reg domain did not change */
+ return;
+
+ rc = wil_switch_board_file(wil, request->alpha2);
+ if (rc)
+ wil_err(wil, "switch board file failed %d\n", rc);
+
+ memcpy(wil->regdomain, request->alpha2, 2);
+}
+
static struct cfg80211_ops wil_cfg80211_ops = {
.add_virtual_intf = wil_cfg80211_add_iface,
.del_virtual_intf = wil_cfg80211_del_iface,
@@ -2055,6 +2118,8 @@
wiphy->mgmt_stypes = wil_mgmt_stypes;
wiphy->features |= NL80211_FEATURE_SK_TX_STATUS;
+ wiphy->reg_notifier = wil_cfg80211_reg_notify;
+
wiphy->n_vendor_commands = ARRAY_SIZE(wil_nl80211_vendor_commands);
wiphy->vendor_commands = wil_nl80211_vendor_commands;
wiphy->vendor_events = wil_nl80211_vendor_events;
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 2baa6cf..f37254d 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -26,6 +26,7 @@
#define WAIT_FOR_HALP_VOTE_MS 100
#define WAIT_FOR_SCAN_ABORT_MS 1000
+#define WIL_BOARD_FILE_MAX_NAMELEN 128
bool debug_fw; /* = false; */
module_param(debug_fw, bool, 0444);
@@ -946,6 +947,30 @@
le32_to_cpus(&r->head);
}
+/* construct actual board file name to use */
+void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len)
+{
+ const char *board_file = WIL_BOARD_FILE_NAME;
+ const char *ext;
+ int prefix_len;
+
+ if (wil->board_file_country[0] == '\0') {
+ strlcpy(buf, board_file, len);
+ return;
+ }
+
+ /* use country specific board file */
+ if (len < strlen(board_file) + 4 /* for _XX and terminating null */)
+ return;
+
+ ext = strrchr(board_file, '.');
+ prefix_len = (ext ? ext - board_file : strlen(board_file));
+ snprintf(buf, len, "%.*s_%.2s",
+ prefix_len, board_file, wil->board_file_country);
+ if (ext)
+ strlcat(buf, ext, len);
+}
+
static int wil_get_bl_info(struct wil6210_priv *wil)
{
struct net_device *ndev = wil_to_ndev(wil);
@@ -1260,8 +1285,12 @@
wil_set_oob_mode(wil, oob_mode);
if (load_fw) {
+ char board_file[WIL_BOARD_FILE_MAX_NAMELEN];
+
+ board_file[0] = '\0';
+ wil_get_board_file(wil, board_file, sizeof(board_file));
wil_info(wil, "Use firmware <%s> + board <%s>\n",
- wil->wil_fw_name, WIL_BOARD_FILE_NAME);
+ wil->wil_fw_name, board_file);
if (!no_flash)
wil_bl_prepare_halt(wil);
@@ -1273,11 +1302,9 @@
if (rc)
goto out;
if (wil->brd_file_addr)
- rc = wil_request_board(wil, WIL_BOARD_FILE_NAME);
+ rc = wil_request_board(wil, board_file);
else
- rc = wil_request_firmware(wil,
- WIL_BOARD_FILE_NAME,
- true);
+ rc = wil_request_firmware(wil, board_file, true);
if (rc)
goto out;
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index f4476ee..2b71deb 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -683,6 +683,7 @@
const char *hw_name;
const char *wil_fw_name;
char *board_file;
+ char board_file_country[3]; /* alpha2 */
u32 brd_file_addr;
u32 brd_file_max_size;
DECLARE_BITMAP(hw_capa, hw_capa_last);
@@ -796,6 +797,8 @@
} snr_thresh;
int fw_calib_result;
+ /* current reg domain configured in kernel */
+ char regdomain[3]; /* alpha2 */
#ifdef CONFIG_PM
struct notifier_block pm_notify;
@@ -873,6 +876,8 @@
wil_w(wil, reg, wil_r(wil, reg) & ~val);
}
+void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len);
+
#if defined(CONFIG_DYNAMIC_DEBUG)
#define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii) \
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 52a297d..0a963b1 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -46,10 +46,20 @@
config OF_PROMTREE
bool
+config OF_KOBJ
+ bool "Display devicetree in sysfs"
+ def_bool SYSFS
+ help
+ Some embedded platforms have no need to display the devicetree
+ nodes and properties in sysfs. Disabling this option will save
+ a small amount of memory, as well as decrease boot time. By
+ default this option will be enabled if SYSFS is enabled.
+
# Hardly any platforms need this. It is safe to select, but only do so if you
# need it.
config OF_DYNAMIC
bool "Support for dynamic device trees" if OF_UNITTEST
+ select OF_KOBJ
help
On some platforms, the device tree can be manipulated at runtime.
While this option is selected automatically on such platforms, you
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index b2f474a..760b730 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -1,4 +1,5 @@
obj-y = base.o device.o platform.o
+obj-$(CONFIG_OF_KOBJ) += kobj.o
obj-$(CONFIG_OF_DYNAMIC) += dynamic.o
obj-$(CONFIG_OF_FLATTREE) += fdt.o
obj-$(CONFIG_OF_EARLY_FLATTREE) += fdt_address.o
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 23a6d36..0dc31cf 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -98,108 +98,6 @@
}
#endif
-#ifndef CONFIG_OF_DYNAMIC
-static void of_node_release(struct kobject *kobj)
-{
- /* Without CONFIG_OF_DYNAMIC, no nodes gets freed */
-}
-#endif /* CONFIG_OF_DYNAMIC */
-
-struct kobj_type of_node_ktype = {
- .release = of_node_release,
-};
-
-static ssize_t of_node_property_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t offset, size_t count)
-{
- struct property *pp = container_of(bin_attr, struct property, attr);
- return memory_read_from_buffer(buf, count, &offset, pp->value, pp->length);
-}
-
-/* always return newly allocated name, caller must free after use */
-static const char *safe_name(struct kobject *kobj, const char *orig_name)
-{
- const char *name = orig_name;
- struct kernfs_node *kn;
- int i = 0;
-
- /* don't be a hero. After 16 tries give up */
- while (i < 16 && (kn = sysfs_get_dirent(kobj->sd, name))) {
- sysfs_put(kn);
- if (name != orig_name)
- kfree(name);
- name = kasprintf(GFP_KERNEL, "%s#%i", orig_name, ++i);
- }
-
- if (name == orig_name) {
- name = kstrdup(orig_name, GFP_KERNEL);
- } else {
- pr_warn("Duplicate name in %s, renamed to \"%s\"\n",
- kobject_name(kobj), name);
- }
- return name;
-}
-
-int __of_add_property_sysfs(struct device_node *np, struct property *pp)
-{
- int rc;
-
- /* Important: Don't leak passwords */
- bool secure = strncmp(pp->name, "security-", 9) == 0;
-
- if (!IS_ENABLED(CONFIG_SYSFS))
- return 0;
-
- if (!of_kset || !of_node_is_attached(np))
- return 0;
-
- sysfs_bin_attr_init(&pp->attr);
- pp->attr.attr.name = safe_name(&np->kobj, pp->name);
- pp->attr.attr.mode = secure ? S_IRUSR : S_IRUGO;
- pp->attr.size = secure ? 0 : pp->length;
- pp->attr.read = of_node_property_read;
-
- rc = sysfs_create_bin_file(&np->kobj, &pp->attr);
- WARN(rc, "error adding attribute %s to node %s\n", pp->name, np->full_name);
- return rc;
-}
-
-int __of_attach_node_sysfs(struct device_node *np)
-{
- const char *name;
- struct kobject *parent;
- struct property *pp;
- int rc;
-
- if (!IS_ENABLED(CONFIG_SYSFS))
- return 0;
-
- if (!of_kset)
- return 0;
-
- np->kobj.kset = of_kset;
- if (!np->parent) {
- /* Nodes without parents are new top level trees */
- name = safe_name(&of_kset->kobj, "base");
- parent = NULL;
- } else {
- name = safe_name(&np->parent->kobj, kbasename(np->full_name));
- parent = &np->parent->kobj;
- }
- if (!name)
- return -ENOMEM;
- rc = kobject_add(&np->kobj, parent, "%s", name);
- kfree(name);
- if (rc)
- return rc;
-
- for_each_property_of_node(np, pp)
- __of_add_property_sysfs(np, pp);
-
- return 0;
-}
-
static struct device_node **phandle_cache;
static u32 phandle_cache_mask;
@@ -2021,22 +1919,6 @@
return 0;
}
-void __of_sysfs_remove_bin_file(struct device_node *np, struct property *prop)
-{
- sysfs_remove_bin_file(&np->kobj, &prop->attr);
- kfree(prop->attr.attr.name);
-}
-
-void __of_remove_property_sysfs(struct device_node *np, struct property *prop)
-{
- if (!IS_ENABLED(CONFIG_SYSFS))
- return;
-
- /* at early boot, bail here and defer setup to of_init() */
- if (of_kset && of_node_is_attached(np))
- __of_sysfs_remove_bin_file(np, prop);
-}
-
/**
* of_remove_property - Remove a property from a node.
*
@@ -2096,21 +1978,6 @@
return 0;
}
-void __of_update_property_sysfs(struct device_node *np, struct property *newprop,
- struct property *oldprop)
-{
- if (!IS_ENABLED(CONFIG_SYSFS))
- return;
-
- /* At early boot, bail out and defer setup to of_init() */
- if (!of_kset)
- return;
-
- if (oldprop)
- __of_sysfs_remove_bin_file(np, oldprop);
- __of_add_property_sysfs(np, newprop);
-}
-
/*
* of_update_property - Update a property in a node, if the property does
* not exist, add it.
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 888fdbc..765ba6e 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -16,6 +16,11 @@
#include "of_private.h"
+static struct device_node *kobj_to_device_node(struct kobject *kobj)
+{
+ return container_of(kobj, struct device_node, kobj);
+}
+
/**
* of_node_get() - Increment refcount of a node
* @node: Node to inc refcount, NULL is supported to simplify writing of
@@ -43,28 +48,6 @@
}
EXPORT_SYMBOL(of_node_put);
-void __of_detach_node_sysfs(struct device_node *np)
-{
- struct property *pp;
-
- if (!IS_ENABLED(CONFIG_SYSFS))
- return;
-
- BUG_ON(!of_node_is_initialized(np));
- if (!of_kset)
- return;
-
- /* only remove properties if on sysfs */
- if (of_node_is_attached(np)) {
- for_each_property_of_node(np, pp)
- __of_sysfs_remove_bin_file(np, pp);
- kobject_del(&np->kobj);
- }
-
- /* finally remove the kobj_init ref */
- of_node_put(np);
-}
-
static BLOCKING_NOTIFIER_HEAD(of_reconfig_chain);
int of_reconfig_notifier_register(struct notifier_block *nb)
diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c
new file mode 100644
index 0000000..662f79e
--- /dev/null
+++ b/drivers/of/kobj.c
@@ -0,0 +1,165 @@
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include "of_private.h"
+
+/* true when node is initialized */
+static int of_node_is_initialized(struct device_node *node)
+{
+ return node && node->kobj.state_initialized;
+}
+
+/* true when node is attached (i.e. present on sysfs) */
+int of_node_is_attached(struct device_node *node)
+{
+ return node && node->kobj.state_in_sysfs;
+}
+
+
+#ifndef CONFIG_OF_DYNAMIC
+static void of_node_release(struct kobject *kobj)
+{
+ /* Without CONFIG_OF_DYNAMIC, no nodes gets freed */
+}
+#endif /* CONFIG_OF_DYNAMIC */
+
+struct kobj_type of_node_ktype = {
+ .release = of_node_release,
+};
+
+static ssize_t of_node_property_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t offset, size_t count)
+{
+ struct property *pp = container_of(bin_attr, struct property, attr);
+ return memory_read_from_buffer(buf, count, &offset, pp->value, pp->length);
+}
+
+/* always return newly allocated name, caller must free after use */
+static const char *safe_name(struct kobject *kobj, const char *orig_name)
+{
+ const char *name = orig_name;
+ struct kernfs_node *kn;
+ int i = 0;
+
+ /* don't be a hero. After 16 tries give up */
+ while (i < 16 && name && (kn = sysfs_get_dirent(kobj->sd, name))) {
+ sysfs_put(kn);
+ if (name != orig_name)
+ kfree(name);
+ name = kasprintf(GFP_KERNEL, "%s#%i", orig_name, ++i);
+ }
+
+ if (name == orig_name) {
+ name = kstrdup(orig_name, GFP_KERNEL);
+ } else {
+ pr_warn("Duplicate name in %s, renamed to \"%s\"\n",
+ kobject_name(kobj), name);
+ }
+ return name;
+}
+
+int __of_add_property_sysfs(struct device_node *np, struct property *pp)
+{
+ int rc;
+
+ /* Important: Don't leak passwords */
+ bool secure = strncmp(pp->name, "security-", 9) == 0;
+
+ if (!IS_ENABLED(CONFIG_SYSFS))
+ return 0;
+
+ if (!of_kset || !of_node_is_attached(np))
+ return 0;
+
+ sysfs_bin_attr_init(&pp->attr);
+ pp->attr.attr.name = safe_name(&np->kobj, pp->name);
+ pp->attr.attr.mode = secure ? 0400 : 0444;
+ pp->attr.size = secure ? 0 : pp->length;
+ pp->attr.read = of_node_property_read;
+
+ rc = sysfs_create_bin_file(&np->kobj, &pp->attr);
+ WARN(rc, "error adding attribute %s to node %s\n", pp->name,
+ np->full_name);
+ return rc;
+}
+
+void __of_sysfs_remove_bin_file(struct device_node *np, struct property *prop)
+{
+ if (!IS_ENABLED(CONFIG_SYSFS))
+ return;
+
+ sysfs_remove_bin_file(&np->kobj, &prop->attr);
+ kfree(prop->attr.attr.name);
+}
+
+void __of_remove_property_sysfs(struct device_node *np, struct property *prop)
+{
+ /* at early boot, bail here and defer setup to of_init() */
+ if (of_kset && of_node_is_attached(np))
+ __of_sysfs_remove_bin_file(np, prop);
+}
+
+void __of_update_property_sysfs(struct device_node *np, struct property *newprop,
+ struct property *oldprop)
+{
+ /* At early boot, bail out and defer setup to of_init() */
+ if (!of_kset)
+ return;
+
+ if (oldprop)
+ __of_sysfs_remove_bin_file(np, oldprop);
+ __of_add_property_sysfs(np, newprop);
+}
+
+int __of_attach_node_sysfs(struct device_node *np)
+{
+ const char *name;
+ struct kobject *parent;
+ struct property *pp;
+ int rc;
+
+ if (!of_kset)
+ return 0;
+
+ np->kobj.kset = of_kset;
+ if (!np->parent) {
+ /* Nodes without parents are new top level trees */
+ name = safe_name(&of_kset->kobj, "base");
+ parent = NULL;
+ } else {
+ name = safe_name(&np->parent->kobj, kbasename(np->full_name));
+ parent = &np->parent->kobj;
+ }
+ if (!name)
+ return -ENOMEM;
+ rc = kobject_add(&np->kobj, parent, "%s", name);
+ kfree(name);
+ if (rc)
+ return rc;
+
+ for_each_property_of_node(np, pp)
+ __of_add_property_sysfs(np, pp);
+
+ return 0;
+}
+
+void __of_detach_node_sysfs(struct device_node *np)
+{
+ struct property *pp;
+
+ BUG_ON(!of_node_is_initialized(np));
+ if (!of_kset)
+ return;
+
+ /* only remove properties if on sysfs */
+ if (of_node_is_attached(np)) {
+ for_each_property_of_node(np, pp)
+ __of_sysfs_remove_bin_file(np, pp);
+ kobject_del(&np->kobj);
+ }
+
+ /* finally remove the kobj_init ref */
+ of_node_put(np);
+}
+
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index c4d7fdc..eb811185 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -35,12 +35,6 @@
extern struct list_head aliases_lookup;
extern struct kset *of_kset;
-
-static inline struct device_node *kobj_to_device_node(struct kobject *kobj)
-{
- return container_of(kobj, struct device_node, kobj);
-}
-
#if defined(CONFIG_OF_DYNAMIC)
extern int of_property_notify(int action, struct device_node *np,
struct property *prop, struct property *old_prop);
@@ -55,6 +49,29 @@
}
#endif /* CONFIG_OF_DYNAMIC */
+#if defined(CONFIG_OF_KOBJ)
+int of_node_is_attached(struct device_node *node);
+int __of_add_property_sysfs(struct device_node *np, struct property *pp);
+void __of_remove_property_sysfs(struct device_node *np, struct property *prop);
+void __of_update_property_sysfs(struct device_node *np, struct property *newprop,
+ struct property *oldprop);
+int __of_attach_node_sysfs(struct device_node *np);
+void __of_detach_node_sysfs(struct device_node *np);
+#else
+static inline int __of_add_property_sysfs(struct device_node *np, struct property *pp)
+{
+ return 0;
+}
+static inline void __of_remove_property_sysfs(struct device_node *np, struct property *prop) {}
+static inline void __of_update_property_sysfs(struct device_node *np,
+ struct property *newprop, struct property *oldprop) {}
+static inline int __of_attach_node_sysfs(struct device_node *np)
+{
+ return 0;
+}
+static inline void __of_detach_node_sysfs(struct device_node *np) {}
+#endif
+
/**
* General utilities for working with live trees.
*
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index df02f98..37df1bf 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -1128,6 +1128,7 @@
armpmu_init(pmu);
pmu->plat_device = pdev;
+ platform_set_drvdata(pdev, pmu);
if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
init_fn = of_id->data;
diff --git a/drivers/platform/msm/gsi/Makefile b/drivers/platform/msm/gsi/Makefile
index b350a59..1eed995 100644
--- a/drivers/platform/msm/gsi/Makefile
+++ b/drivers/platform/msm/gsi/Makefile
@@ -1 +1,8 @@
obj-$(CONFIG_GSI) += gsi.o gsi_dbg.o
+
+ifdef CONFIG_X86
+ccflags-y += -DIPA_EMULATION_COMPILE=1
+obj-$(CONFIG_GSI) += gsi_emulation.o
+else
+ccflags-y += -DIPA_EMULATION_COMPILE=0
+endif
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index e729c56..1af3447 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -20,6 +20,7 @@
#include <linux/delay.h>
#include "gsi.h"
#include "gsi_reg.h"
+#include "gsi_emulation.h"
#define GSI_CMD_TIMEOUT (5*HZ)
#define GSI_STOP_CMD_TIMEOUT_MS 20
@@ -33,6 +34,8 @@
{ },
};
+static bool running_emulation = IPA_EMULATION_COMPILE;
+
struct gsi_ctx *gsi_ctx;
static void __gsi_config_type_irq(int ee, uint32_t mask, uint32_t val)
@@ -577,7 +580,7 @@
if (!type)
break;
- GSIDBG_LOW("type %x\n", type);
+ GSIDBG_LOW("type 0x%x\n", type);
if (type & GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK)
gsi_handle_ch_ctrl(ee);
@@ -777,17 +780,57 @@
GSIERR("bad irq specified %u\n", props->irq);
return -GSI_STATUS_INVALID_PARAMS;
}
-
- res = devm_request_irq(gsi_ctx->dev, props->irq,
+ /*
+ * On a real UE, there are two separate interrupt
+ * vectors that get directed toward the GSI/IPA
+ * drivers. They are handled by gsi_isr() and
+ * (ipa_isr() or ipa3_isr()) respectively. In the
+ * emulation environment, this is not the case;
+ * instead, interrupt vectors are routed to the
+ * emualation hardware's interrupt controller, who in
+ * turn, forwards a single interrupt to the GSI/IPA
+ * driver. When the new interrupt vector is received,
+ * the driver needs to probe the interrupt
+ * controller's registers so see if one, the other, or
+ * both interrupts have occurred. Given the above, we
+ * now need to handle both situations, namely: the
+ * emulator's and the real UE.
+ */
+ if (running_emulation) {
+ /*
+ * New scheme involving the emulator's
+ * interrupt controller.
+ */
+ res = devm_request_threaded_irq(
+ gsi_ctx->dev,
+ props->irq,
+ /* top half handler to follow */
+ emulator_hard_irq_isr,
+ /* threaded bottom half handler to follow */
+ emulator_soft_irq_isr,
+ IRQF_SHARED,
+ "emulator_intcntrlr",
+ gsi_ctx);
+ } else {
+ /*
+ * Traditional scheme used on the real UE.
+ */
+ res = devm_request_irq(gsi_ctx->dev, props->irq,
(irq_handler_t) gsi_isr,
props->req_clk_cb ? IRQF_TRIGGER_RISING :
IRQF_TRIGGER_HIGH,
"gsi",
gsi_ctx);
+ }
if (res) {
- GSIERR("failed to register isr for %u\n", props->irq);
+ GSIERR(
+ "failed to register isr for %u\n",
+ props->irq);
return -GSI_STATUS_ERROR;
}
+ GSIDBG(
+ "succeeded to register isr for %u\n",
+ props->irq);
res = enable_irq_wake(props->irq);
if (res)
@@ -808,6 +851,41 @@
return -GSI_STATUS_RES_ALLOC_FAILURE;
}
+ GSIDBG("GSI base(%pa) mapped to (%pK) with len (0x%lx)\n",
+ &(props->phys_addr),
+ gsi_ctx->base,
+ props->size);
+
+ if (running_emulation) {
+ GSIDBG("GSI SW ver register value 0x%x\n",
+ gsi_readl(gsi_ctx->base +
+ GSI_EE_n_GSI_SW_VERSION_OFFS(0)));
+ gsi_ctx->intcntrlr_mem_size =
+ props->emulator_intcntrlr_size;
+ gsi_ctx->intcntrlr_base =
+ devm_ioremap_nocache(
+ gsi_ctx->dev,
+ props->emulator_intcntrlr_addr,
+ props->emulator_intcntrlr_size);
+ if (!gsi_ctx->intcntrlr_base) {
+ GSIERR(
+ "failed to remap emulator's interrupt controller HW\n");
+ devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
+ devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
+ return -GSI_STATUS_RES_ALLOC_FAILURE;
+ }
+
+ GSIDBG(
+ "Emulator's interrupt controller base(%pa) mapped to (%pK) with len (0x%lx)\n",
+ &(props->emulator_intcntrlr_addr),
+ gsi_ctx->intcntrlr_base,
+ props->emulator_intcntrlr_size);
+
+ gsi_ctx->intcntrlr_gsi_isr = gsi_isr;
+ gsi_ctx->intcntrlr_client_isr =
+ props->emulator_intcntrlr_client_isr;
+ }
+
gsi_ctx->per = *props;
gsi_ctx->per_registered = true;
mutex_init(&gsi_ctx->mlock);
@@ -816,6 +894,9 @@
gsi_ctx->max_ch = gsi_get_max_channels(gsi_ctx->per.ver);
if (gsi_ctx->max_ch == 0) {
devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
+ if (running_emulation)
+ devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
+ gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
GSIERR("failed to get max channels\n");
return -GSI_STATUS_ERROR;
@@ -823,6 +904,9 @@
gsi_ctx->max_ev = gsi_get_max_event_rings(gsi_ctx->per.ver);
if (gsi_ctx->max_ev == 0) {
devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
+ if (running_emulation)
+ devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
+ gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
GSIERR("failed to get max event rings\n");
return -GSI_STATUS_ERROR;
@@ -831,7 +915,9 @@
if (props->mhi_er_id_limits_valid &&
props->mhi_er_id_limits[0] > (gsi_ctx->max_ev - 1)) {
devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
- gsi_ctx->base = NULL;
+ if (running_emulation)
+ devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
+ gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
GSIERR("MHI event ring start id %u is beyond max %u\n",
props->mhi_er_id_limits[0], gsi_ctx->max_ev);
@@ -872,6 +958,22 @@
gsi_writel(0, gsi_ctx->base +
GSI_EE_n_ERROR_LOG_OFFS(gsi_ctx->per.ee));
+ if (running_emulation) {
+ /*
+ * Set up the emulator's interrupt controller...
+ */
+ res = setup_emulator_cntrlr(
+ gsi_ctx->intcntrlr_base, gsi_ctx->intcntrlr_mem_size);
+ if (res != 0) {
+ devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
+ devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
+ gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
+ devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
+ GSIERR("setup_emulator_cntrlr() failed\n");
+ return res;
+ }
+ }
+
*dev_hdl = (uintptr_t)gsi_ctx;
return GSI_STATUS_SUCCESS;
@@ -2134,6 +2236,10 @@
BUG();
}
+ /* Hardware issue fixed from GSI 2.0 and no need for the WA */
+ if (gsi_ctx->per.ver >= GSI_VER_2_0)
+ reset_done = true;
+
/* workaround: reset GSI producers again */
if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && !reset_done) {
usleep_range(GSI_RESET_WA_MIN_SLEEP, GSI_RESET_WA_MAX_SLEEP);
@@ -2730,24 +2836,47 @@
{
void __iomem *gsi_base = (void __iomem *)base;
- gsi_writel(1, gsi_base + GSI_GSI_IRAM_PTR_CH_CMD_OFFS);
- gsi_writel(2, gsi_base + GSI_GSI_IRAM_PTR_CH_DB_OFFS);
- gsi_writel(3, gsi_base + GSI_GSI_IRAM_PTR_CH_DIS_COMP_OFFS);
- gsi_writel(4, gsi_base + GSI_GSI_IRAM_PTR_CH_EMPTY_OFFS);
- gsi_writel(5, gsi_base + GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS);
- gsi_writel(6, gsi_base + GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS);
- gsi_writel(7, gsi_base + GSI_GSI_IRAM_PTR_INT_MOD_STOPED_OFFS);
- gsi_writel(8, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS);
- gsi_writel(9, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS);
- gsi_writel(10, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS);
- gsi_writel(11, gsi_base + GSI_GSI_IRAM_PTR_NEW_RE_OFFS);
- gsi_writel(12, gsi_base + GSI_GSI_IRAM_PTR_READ_ENG_COMP_OFFS);
- gsi_writel(13, gsi_base + GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS);
+ gsi_writel(1,
+ gsi_base + GSI_GSI_IRAM_PTR_CH_CMD_OFFS);
+ gsi_writel(2,
+ gsi_base + GSI_GSI_IRAM_PTR_CH_DB_OFFS);
+ gsi_writel(3,
+ gsi_base + GSI_GSI_IRAM_PTR_CH_DIS_COMP_OFFS);
+ gsi_writel(4,
+ gsi_base + GSI_GSI_IRAM_PTR_CH_EMPTY_OFFS);
+ gsi_writel(5,
+ gsi_base + GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS);
+ gsi_writel(6,
+ gsi_base + GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS);
+ gsi_writel(7,
+ gsi_base + GSI_GSI_IRAM_PTR_INT_MOD_STOPED_OFFS);
+ gsi_writel(8,
+ gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS);
+ gsi_writel(9,
+ gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS);
+ gsi_writel(10,
+ gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS);
+ gsi_writel(11,
+ gsi_base + GSI_GSI_IRAM_PTR_NEW_RE_OFFS);
+ gsi_writel(12,
+ gsi_base + GSI_GSI_IRAM_PTR_READ_ENG_COMP_OFFS);
+ gsi_writel(13,
+ gsi_base + GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS);
+
+ if (running_emulation) {
+ gsi_writel(14,
+ gsi_base + GSI_GSI_IRAM_PTR_EV_DB_OFFS);
+ gsi_writel(15,
+ gsi_base + GSI_GSI_IRAM_PTR_UC_GP_INT_OFFS);
+ gsi_writel(16,
+ gsi_base + GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_OFFS);
+ }
}
static void gsi_configure_bck_prs_matrix(void *base)
{
void __iomem *gsi_base = (void __iomem *)base;
+
/*
* For now, these are default values. In the future, GSI FW image will
* produce optimized back-pressure values based on the FW image.
@@ -2970,15 +3099,45 @@
},
};
+static struct platform_device *pdev;
+
/**
* Module Init.
*/
static int __init gsi_init(void)
{
+ int ret;
+
pr_debug("gsi_init\n");
- return platform_driver_register(&msm_gsi_driver);
+
+ ret = platform_driver_register(&msm_gsi_driver);
+ if (ret < 0)
+ goto out;
+
+ if (running_emulation) {
+ pdev = platform_device_register_simple("gsi", -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ ret = PTR_ERR(pdev);
+ platform_driver_unregister(&msm_gsi_driver);
+ goto out;
+ }
+ }
+
+out:
+ return ret;
}
+/*
+ * Module exit.
+ */
+static void __exit gsi_exit(void)
+{
+ if (running_emulation && pdev)
+ platform_device_unregister(pdev);
+ platform_driver_unregister(&msm_gsi_driver);
+}
+
+module_exit(gsi_exit);
arch_initcall(gsi_init);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/msm/gsi/gsi.h b/drivers/platform/msm/gsi/gsi.h
index 7e10405..92849d9 100644
--- a/drivers/platform/msm/gsi/gsi.h
+++ b/drivers/platform/msm/gsi/gsi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,8 +18,16 @@
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/msm_gsi.h>
+#include <linux/errno.h>
#include <linux/ipc_logging.h>
+/*
+ * The following for adding code (ie. for EMULATION) not found on x86.
+ */
+#if IPA_EMULATION_COMPILE == 1
+# include "gsi_emulation_stubs.h"
+#endif
+
#define GSI_CHAN_MAX 31
#define GSI_EVT_RING_MAX 23
#define GSI_NO_EVT_ERINDEX 31
@@ -204,6 +212,13 @@
struct completion gen_ee_cmd_compl;
void *ipc_logbuf;
void *ipc_logbuf_low;
+ /*
+ * The following used only on emulation systems.
+ */
+ void __iomem *intcntrlr_base;
+ u32 intcntrlr_mem_size;
+ irq_handler_t intcntrlr_gsi_isr;
+ irq_handler_t intcntrlr_client_isr;
};
enum gsi_re_type {
diff --git a/drivers/platform/msm/gsi/gsi_emulation.c b/drivers/platform/msm/gsi/gsi_emulation.c
new file mode 100644
index 0000000..adaaaaa
--- /dev/null
+++ b/drivers/platform/msm/gsi/gsi_emulation.c
@@ -0,0 +1,233 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "gsi_emulation.h"
+
+/*
+ * *****************************************************************************
+ * The following used to set up the EMULATION interrupt controller...
+ * *****************************************************************************
+ */
+int setup_emulator_cntrlr(
+ void __iomem *intcntrlr_base,
+ u32 intcntrlr_mem_size)
+{
+ uint32_t val, ver, intrCnt, rangeCnt, range;
+
+ val = gsi_emu_readl(intcntrlr_base + GE_INT_CTL_VER_CNT);
+
+ intrCnt = val & 0xFFFF;
+ ver = (val >> 16) & 0xFFFF;
+ rangeCnt = intrCnt / 32;
+
+ GSIDBG(
+ "CTL_VER_CNT reg val(0x%x) intr cnt(%u) cntrlr ver(0x%x) rangeCnt(%u)\n",
+ val, intrCnt, ver, rangeCnt);
+
+ /*
+ * Verify the interrupt controller version
+ */
+ if (ver == 0 || ver == 0xFFFF || ver < DEO_IC_INT_CTL_VER_MIN) {
+ GSIERR(
+ "Error: invalid interrupt controller version 0x%x\n",
+ ver);
+ return -GSI_STATUS_INVALID_PARAMS;
+ }
+
+ /*
+ * Verify the interrupt count
+ *
+ * NOTE: intrCnt must be at least one block and multiple of 32
+ */
+ if ((intrCnt % 32) != 0) {
+ GSIERR(
+ "Invalid interrupt count read from HW 0x%04x\n",
+ intrCnt);
+ return -GSI_STATUS_ERROR;
+ }
+
+ /*
+ * Calculate number of ranges used, each range handles 32 int lines
+ */
+ if (rangeCnt > DEO_IC_MAX_RANGE_CNT) {
+ GSIERR(
+ "SW interrupt limit(%u) passed, increase DEO_IC_MAX_RANGE_CNT(%u)\n",
+ rangeCnt,
+ DEO_IC_MAX_RANGE_CNT);
+ return -GSI_STATUS_ERROR;
+ }
+
+ /*
+ * Let's take the last register offset minus the first
+ * register offset (ie. range) and compare it to the interrupt
+ * controller's dtsi defined memory size. The range better
+ * fit within the size.
+ */
+ val = GE_SOFT_INT_n(rangeCnt-1) - GE_INT_CTL_VER_CNT;
+ if (val > intcntrlr_mem_size) {
+ GSIERR(
+ "Interrupt controller register range (%u) exceeds dtsi provisioned size (%u)\n",
+ val, intcntrlr_mem_size);
+ return -GSI_STATUS_ERROR;
+ }
+
+ /*
+ * The following will disable the emulators interrupt controller,
+ * so that we can config it...
+ */
+ GSIDBG("Writing GE_INT_MASTER_ENABLE\n");
+ gsi_emu_writel(
+ 0x0,
+ intcntrlr_base + GE_INT_MASTER_ENABLE);
+
+ /*
+ * Init register maps of all ranges
+ */
+ for (range = 0; range < rangeCnt; range++) {
+ /*
+ * Disable all int sources by setting all enable clear bits
+ */
+ GSIDBG("Writing GE_INT_ENABLE_CLEAR_n(%u)\n", range);
+ gsi_emu_writel(
+ 0xFFFFFFFF,
+ intcntrlr_base + GE_INT_ENABLE_CLEAR_n(range));
+
+ /*
+ * Clear all raw statuses
+ */
+ GSIDBG("Writing GE_INT_CLEAR_n(%u)\n", range);
+ gsi_emu_writel(
+ 0xFFFFFFFF,
+ intcntrlr_base + GE_INT_CLEAR_n(range));
+
+ /*
+ * Init all int types
+ */
+ GSIDBG("Writing GE_INT_TYPE_n(%u)\n", range);
+ gsi_emu_writel(
+ 0x0,
+ intcntrlr_base + GE_INT_TYPE_n(range));
+ }
+
+ /*
+ * The following tells the interrupt controller to interrupt us
+ * when it sees interupts from ipa and/or gsi.
+ *
+ * Interrupts:
+ * ===================================================================
+ * DUT0 [ 63 : 16 ]
+ * ipa_irq [ 3 : 0 ] <---HERE
+ * ipa_gsi_bam_irq [ 7 : 4 ] <---HERE
+ * ipa_bam_apu_sec_error_irq [ 8 ]
+ * ipa_bam_apu_non_sec_error_irq [ 9 ]
+ * ipa_bam_xpu2_msa_intr [ 10 ]
+ * ipa_vmidmt_nsgcfgirpt [ 11 ]
+ * ipa_vmidmt_nsgirpt [ 12 ]
+ * ipa_vmidmt_gcfgirpt [ 13 ]
+ * ipa_vmidmt_girpt [ 14 ]
+ * bam_xpu3_qad_non_secure_intr_sp [ 15 ]
+ */
+ GSIDBG("Writing GE_INT_ENABLE_n(0)\n");
+ gsi_emu_writel(
+ 0x00FF, /* See <---HERE above */
+ intcntrlr_base + GE_INT_ENABLE_n(0));
+
+ /*
+ * The following will enable the IC post config...
+ */
+ GSIDBG("Writing GE_INT_MASTER_ENABLE\n");
+ gsi_emu_writel(
+ 0x1,
+ intcntrlr_base + GE_INT_MASTER_ENABLE);
+
+ return 0;
+}
+
+/*
+ * *****************************************************************************
+ * The following for EMULATION hard irq...
+ * *****************************************************************************
+ */
+irqreturn_t emulator_hard_irq_isr(
+ int irq,
+ void *ctxt)
+{
+ struct gsi_ctx *gsi_ctx_ptr = (struct gsi_ctx *) ctxt;
+
+ uint32_t val;
+
+ val = gsi_emu_readl(gsi_ctx_ptr->intcntrlr_base + GE_INT_MASTER_STATUS);
+
+ /*
+ * If bit zero is set, interrupt is for us, hence return IRQ_NONE
+ * when it's not set...
+ */
+ if (!(val & 0x00000001))
+ return IRQ_NONE;
+
+ /*
+ * The following will mask (ie. turn off) future interrupts from
+ * the emulator's interrupt controller. It wil stay this way until
+ * we turn back on...which will be done in the bottom half
+ * (ie. emulator_soft_irq_isr)...
+ */
+ gsi_emu_writel(
+ 0x0,
+ gsi_ctx_ptr->intcntrlr_base + GE_INT_OUT_ENABLE);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/*
+ * *****************************************************************************
+ * The following for EMULATION soft irq...
+ * *****************************************************************************
+ */
+irqreturn_t emulator_soft_irq_isr(
+ int irq,
+ void *ctxt)
+{
+ struct gsi_ctx *gsi_ctx_ptr = (struct gsi_ctx *) ctxt;
+
+ irqreturn_t retVal = IRQ_HANDLED;
+ uint32_t val;
+
+ val = gsi_emu_readl(gsi_ctx_ptr->intcntrlr_base + GE_IRQ_STATUS_n(0));
+
+ GSIDBG("Got irq(%d) with status(0x%08X)\n", irq, val);
+
+ if (val & 0xF0 && gsi_ctx_ptr->intcntrlr_gsi_isr) {
+ GSIDBG("Got gsi interrupt\n");
+ retVal = gsi_ctx_ptr->intcntrlr_gsi_isr(irq, ctxt);
+ }
+
+ if (val & 0x0F && gsi_ctx_ptr->intcntrlr_client_isr) {
+ GSIDBG("Got ipa interrupt\n");
+ retVal = gsi_ctx_ptr->intcntrlr_client_isr(irq, 0);
+ }
+
+ /*
+ * The following will clear the interrupts...
+ */
+ gsi_emu_writel(
+ 0xFFFFFFFF,
+ gsi_ctx_ptr->intcntrlr_base + GE_INT_CLEAR_n(0));
+
+ /*
+ * The following will unmask (ie. turn on) future interrupts from
+ * the emulator's interrupt controller...
+ */
+ gsi_emu_writel(
+ 0x1,
+ gsi_ctx_ptr->intcntrlr_base + GE_INT_OUT_ENABLE);
+
+ return retVal;
+}
diff --git a/drivers/platform/msm/gsi/gsi_emulation.h b/drivers/platform/msm/gsi/gsi_emulation.h
new file mode 100644
index 0000000..246f9a8
--- /dev/null
+++ b/drivers/platform/msm/gsi/gsi_emulation.h
@@ -0,0 +1,192 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#if !defined(_GSI_EMULATION_H_)
+# define _GSI_EMULATION_H_
+
+# include <linux/interrupt.h>
+
+# include "gsi.h"
+# include "gsi_reg.h"
+# include "gsi_emulation_stubs.h"
+
+# define gsi_emu_readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
+# define gsi_emu_writel(v, c) ({ __iowmb(); writel_relaxed((v), (c)); })
+
+# define CNTRLR_BASE 0
+
+/*
+ * The following file contains definitions and declarations that are
+ * germane only to the IPA emulation system, which is run from an X86
+ * environment. Declaration's for non-X86 (ie. arm) are merely stubs
+ * to facilitate compile and link.
+ *
+ * Interrupt controller registers.
+ * Descriptions taken from the EMULATION interrupt controller SWI.
+ * - There is only one Master Enable register
+ * - Each group of 32 interrupt lines (range) is controlled by 8 registers,
+ * which are consecutive in memory:
+ * GE_INT_ENABLE_n
+ * GE_INT_ENABLE_CLEAR_n
+ * GE_INT_ENABLE_SET_n
+ * GE_INT_TYPE_n
+ * GE_IRQ_STATUS_n
+ * GE_RAW_STATUS_n
+ * GE_INT_CLEAR_n
+ * GE_SOFT_INT_n
+ * - After the above 8 registers, there are the registers of the next
+ * group (range) of 32 interrupt lines, and so on.
+ */
+
+/** @brief The interrupt controller version and interrupt count register.
+ * Specifies interrupt controller version (upper 16 bits) and the
+ * number of interrupt lines supported by HW (lower 16 bits).
+ */
+# define GE_INT_CTL_VER_CNT \
+ (CNTRLR_BASE + 0x0000)
+
+/** @brief Enable or disable physical IRQ output signal to the system,
+ * not affecting any status registers.
+ *
+ * 0x0 : DISABLE IRQ output disabled
+ * 0x1 : ENABLE IRQ output enabled
+ */
+# define GE_INT_OUT_ENABLE \
+ (CNTRLR_BASE + 0x0004)
+
+/** @brief The IRQ master enable register.
+ * Bit #0: IRQ_ENABLE, set 0 to disable, 1 to enable.
+ */
+# define GE_INT_MASTER_ENABLE \
+ (CNTRLR_BASE + 0x0008)
+
+# define GE_INT_MASTER_STATUS \
+ (CNTRLR_BASE + 0x000C)
+
+/** @brief Each bit disables (bit=0, default) or enables (bit=1) the
+ * corresponding interrupt source
+ */
+# define GE_INT_ENABLE_n(n) \
+ (CNTRLR_BASE + 0x0010 + 0x20 * (n))
+
+/** @brief Write bit=1 to clear (to 0) the corresponding bit(s) in INT_ENABLE.
+ * Does nothing for bit=0
+ */
+# define GE_INT_ENABLE_CLEAR_n(n) \
+ (CNTRLR_BASE + 0x0014 + 0x20 * (n))
+
+/** @brief Write bit=1 to set (to 1) the corresponding bit(s) in INT_ENABLE.
+ * Does nothing for bit=0
+ */
+# define GE_INT_ENABLE_SET_n(n) \
+ (CNTRLR_BASE + 0x0018 + 0x20 * (n))
+
+/** @brief Select level (bit=0, default) or edge (bit=1) sensitive input
+ * detection logic for each corresponding interrupt source
+ */
+# define GE_INT_TYPE_n(n) \
+ (CNTRLR_BASE + 0x001C + 0x20 * (n))
+
+/** @brief Shows the interrupt sources captured in RAW_STATUS that have been
+ * steered to irq_n by INT_SELECT. Interrupts must also be enabled by
+ * INT_ENABLE and MASTER_ENABLE. Read only register.
+ * Bit values: 1=active, 0=inactive
+ */
+# define GE_IRQ_STATUS_n(n) \
+ (CNTRLR_BASE + 0x0020 + 0x20 * (n))
+
+/** @brief Shows the interrupt sources that have been latched by the input
+ * logic of the Interrupt Controller. Read only register.
+ * Bit values: 1=active, 0=inactive
+ */
+# define GE_RAW_STATUS_n(n) \
+ (CNTRLR_BASE + 0x0024 + 0x20 * (n))
+
+/** @brief Write bit=1 to clear the corresponding bit(s) in RAW_STATUS.
+ * Does nothing for bit=0
+ */
+# define GE_INT_CLEAR_n(n) \
+ (CNTRLR_BASE + 0x0028 + 0x20 * (n))
+
+/** @brief Write bit=1 to set the corresponding bit(s) in RAW_STATUS.
+ * Does nothing for bit=0.
+ * @note Only functional for edge detected interrupts
+ */
+# define GE_SOFT_INT_n(n) \
+ (CNTRLR_BASE + 0x002C + 0x20 * (n))
+
+/** @brief Maximal number of ranges in SW. Each range supports 32 interrupt
+ * lines. If HW is extended considerably, increase this value
+ */
+# define DEO_IC_MAX_RANGE_CNT 8
+
+/** @brief Size of the registers of one range in memory, in bytes */
+# define DEO_IC_RANGE_MEM_SIZE 32 /* SWI: 8 registers, no gaps */
+
+/** @brief Minimal Interrupt controller HW version */
+# define DEO_IC_INT_CTL_VER_MIN 0x0102
+
+
+#if IPA_EMULATION_COMPILE == 1 /* declarations to follow */
+
+/*
+ * *****************************************************************************
+ * The following used to set up the EMULATION interrupt controller...
+ * *****************************************************************************
+ */
+int setup_emulator_cntrlr(
+ void __iomem *intcntrlr_base,
+ u32 intcntrlr_mem_size);
+
+/*
+ * *****************************************************************************
+ * The following for EMULATION hard irq...
+ * *****************************************************************************
+ */
+irqreturn_t emulator_hard_irq_isr(
+ int irq,
+ void *ctxt);
+
+/*
+ * *****************************************************************************
+ * The following for EMULATION soft irq...
+ * *****************************************************************************
+ */
+irqreturn_t emulator_soft_irq_isr(
+ int irq,
+ void *ctxt);
+
+# else /* #if IPA_EMULATION_COMPILE != 1, then definitions to follow */
+
+static inline int setup_emulator_cntrlr(
+ void __iomem *intcntrlr_base,
+ u32 intcntrlr_mem_size)
+{
+ return 0;
+}
+
+static inline irqreturn_t emulator_hard_irq_isr(
+ int irq,
+ void *ctxt)
+{
+ return IRQ_NONE;
+}
+
+static inline irqreturn_t emulator_soft_irq_isr(
+ int irq,
+ void *ctxt)
+{
+ return IRQ_HANDLED;
+}
+
+# endif /* #if IPA_EMULATION_COMPILE == 1 */
+
+#endif /* #if !defined(_GSI_EMULATION_H_) */
diff --git a/drivers/platform/msm/gsi/gsi_emulation_stubs.h b/drivers/platform/msm/gsi/gsi_emulation_stubs.h
new file mode 100644
index 0000000..dd9d0df
--- /dev/null
+++ b/drivers/platform/msm/gsi/gsi_emulation_stubs.h
@@ -0,0 +1,20 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(_GSI_EMULATION_STUBS_H_)
+# define _GSI_EMULATION_STUBS_H_
+
+# include <asm/barrier.h>
+# define __iormb() rmb() /* used in gsi.h */
+# define __iowmb() wmb() /* used in gsi.h */
+
+#endif /* #if !defined(_GSI_EMULATION_STUBS_H_) */
diff --git a/drivers/platform/msm/ipa/Makefile b/drivers/platform/msm/ipa/Makefile
index 15ed471..857e17b 100644
--- a/drivers/platform/msm/ipa/Makefile
+++ b/drivers/platform/msm/ipa/Makefile
@@ -1,3 +1,9 @@
+ifdef CONFIG_X86
+ccflags-y += -DIPA_EMULATION_COMPILE=1
+else
+ccflags-y += -DIPA_EMULATION_COMPILE=0
+endif
+
obj-$(CONFIG_IPA) += ipa_v2/ ipa_clients/ ipa_common
obj-$(CONFIG_IPA3) += ipa_v3/ ipa_clients/ ipa_common
obj-$(CONFIG_IPA_UT) += test/
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index fdcf44d..3a75bdd 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -19,8 +19,16 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/ipa_uc_offload.h>
+#include <linux/pci.h>
#include "ipa_api.h"
+/*
+ * The following for adding code (ie. for EMULATION) not found on x86.
+ */
+#if IPA_EMULATION_COMPILE == 1
+# include "ipa_v3/ipa_emulation_stubs.h"
+#endif
+
#define DRV_NAME "ipa"
#define IPA_API_DISPATCH_RETURN(api, p...) \
@@ -96,6 +104,8 @@
} \
} while (0)
+static bool running_emulation = IPA_EMULATION_COMPILE;
+
static enum ipa_hw_type ipa_api_hw_type;
static struct ipa_api_controller *ipa_api_ctrl;
@@ -2916,6 +2926,57 @@
{}
};
+/*********************************************************/
+/* PCIe Version */
+/*********************************************************/
+
+static const struct of_device_id ipa_pci_drv_match[] = {
+ { .compatible = "qcom,ipa", },
+ {}
+};
+
+/*
+ * Forward declarations of static functions required for PCI
+ * registraion
+ *
+ * VENDOR and DEVICE should be defined in pci_ids.h
+ */
+static int ipa_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void ipa_pci_remove(struct pci_dev *pdev);
+static void ipa_pci_shutdown(struct pci_dev *pdev);
+static pci_ers_result_t ipa_pci_io_error_detected(struct pci_dev *dev,
+ pci_channel_state_t state);
+static pci_ers_result_t ipa_pci_io_slot_reset(struct pci_dev *dev);
+static void ipa_pci_io_resume(struct pci_dev *dev);
+
+#define LOCAL_VENDOR 0x17CB
+#define LOCAL_DEVICE 0x00ff
+
+static const char ipa_pci_driver_name[] = "qcipav3";
+
+static const struct pci_device_id ipa_pci_tbl[] = {
+ { PCI_DEVICE(LOCAL_VENDOR, LOCAL_DEVICE) },
+ { 0, 0, 0, 0, 0, 0, 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, ipa_pci_tbl);
+
+/* PCI Error Recovery */
+static const struct pci_error_handlers ipa_pci_err_handler = {
+ .error_detected = ipa_pci_io_error_detected,
+ .slot_reset = ipa_pci_io_slot_reset,
+ .resume = ipa_pci_io_resume,
+};
+
+static struct pci_driver ipa_pci_driver = {
+ .name = ipa_pci_driver_name,
+ .id_table = ipa_pci_tbl,
+ .probe = ipa_pci_probe,
+ .remove = ipa_pci_remove,
+ .shutdown = ipa_pci_shutdown,
+ .err_handler = &ipa_pci_err_handler
+};
+
static int ipa_generic_plat_drv_probe(struct platform_device *pdev_p)
{
int result;
@@ -3364,10 +3425,86 @@
},
};
+/*********************************************************/
+/* PCIe Version */
+/*********************************************************/
+
+static int ipa_pci_probe(
+ struct pci_dev *pci_dev,
+ const struct pci_device_id *ent)
+{
+ int result;
+
+ if (!pci_dev || !ent) {
+ pr_err(
+ "Bad arg: pci_dev (%pK) and/or ent (%pK)\n",
+ pci_dev, ent);
+ return -EOPNOTSUPP;
+ }
+
+ if (!ipa_api_ctrl) {
+ ipa_api_ctrl = kzalloc(sizeof(*ipa_api_ctrl), GFP_KERNEL);
+ if (ipa_api_ctrl == NULL)
+ return -ENOMEM;
+ /* Get IPA HW Version */
+ result = of_property_read_u32(NULL,
+ "qcom,ipa-hw-ver", &ipa_api_hw_type);
+ if (result || ipa_api_hw_type == 0) {
+ pr_err("ipa: get resource failed for ipa-hw-ver!\n");
+ kfree(ipa_api_ctrl);
+ ipa_api_ctrl = NULL;
+ return -ENODEV;
+ }
+ pr_debug("ipa: ipa_api_hw_type = %d", ipa_api_hw_type);
+ }
+
+ /*
+ * Call a reduced version of platform_probe appropriate for PCIe
+ */
+ result = ipa3_pci_drv_probe(pci_dev, ipa_api_ctrl, ipa_pci_drv_match);
+
+ if (result && result != -EPROBE_DEFER)
+ pr_err("ipa: ipa3_pci_drv_probe failed\n");
+
+ if (running_emulation)
+ ipa_ut_module_init();
+
+ return result;
+}
+
+static void ipa_pci_remove(struct pci_dev *pci_dev)
+{
+ if (running_emulation)
+ ipa_ut_module_exit();
+}
+
+static void ipa_pci_shutdown(struct pci_dev *pci_dev)
+{
+}
+
+static pci_ers_result_t ipa_pci_io_error_detected(struct pci_dev *pci_dev,
+ pci_channel_state_t state)
+{
+ return 0;
+}
+
+static pci_ers_result_t ipa_pci_io_slot_reset(struct pci_dev *pci_dev)
+{
+ return 0;
+}
+
+static void ipa_pci_io_resume(struct pci_dev *pci_dev)
+{
+}
+
static int __init ipa_module_init(void)
{
pr_debug("IPA module init\n");
+ if (running_emulation) {
+ /* Register as a PCI device driver */
+ return pci_register_driver(&ipa_pci_driver);
+ }
/* Register as a platform device driver */
return platform_driver_register(&ipa_plat_drv);
}
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
index b7edb6f..cbcb0ee 100644
--- a/drivers/platform/msm/ipa/ipa_api.h
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -452,6 +452,10 @@
int ipa3_plat_drv_probe(struct platform_device *pdev_p,
struct ipa_api_controller *api_ctrl,
const struct of_device_id *pdrv_match);
+int ipa3_pci_drv_probe(
+ struct pci_dev *pci_dev,
+ struct ipa_api_controller *api_ctrl,
+ const struct of_device_id *pdrv_match);
#else
static inline int ipa3_plat_drv_probe(struct platform_device *pdev_p,
struct ipa_api_controller *api_ctrl,
@@ -459,6 +463,13 @@
{
return -ENODEV;
}
+static inline int ipa3_pci_drv_probe(
+ struct pci_dev *pci_dev,
+ struct ipa_api_controller *api_ctrl,
+ const struct of_device_id *pdrv_match)
+{
+ return -ENODEV;
+}
#endif /* (CONFIG_IPA3) */
#endif /* _IPA_API_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
index d9e3ab9..c7df5cf 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -2153,6 +2153,18 @@
static void ipa_usb_debugfs_remove(void){}
#endif /* CONFIG_DEBUG_FS */
+static int ipa_usb_set_lock_unlock(bool is_lock)
+{
+ IPA_USB_DBG("entry\n");
+ if (is_lock)
+ mutex_lock(&ipa3_usb_ctx->general_mutex);
+ else
+ mutex_unlock(&ipa3_usb_ctx->general_mutex);
+ IPA_USB_DBG("exit\n");
+
+ return 0;
+}
+
int ipa_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params,
@@ -2216,6 +2228,16 @@
goto connect_fail;
}
+ /*
+ * Register for xdci lock/unlock callback with ipa core driver.
+ * As per use case, only register for IPA_CONS end point for now.
+ * If needed we can include the same for IPA_PROD ep.
+ * For IPA_USB_DIAG/DPL config there will not be any UL ep.
+ */
+ if (connect_params->teth_prot != IPA_USB_DIAG)
+ ipa3_register_lock_unlock_callback(&ipa_usb_set_lock_unlock,
+ ul_out_params->clnt_hdl);
+
IPA_USB_DBG_LOW("exit\n");
mutex_unlock(&ipa3_usb_ctx->general_mutex);
return 0;
@@ -2293,6 +2315,15 @@
}
}
+ /*
+ * Deregister for xdci lock/unlock callback from ipa core driver.
+ * As per use case, only deregister for IPA_CONS end point for now.
+ * If needed we can include the same for IPA_PROD ep.
+ * For IPA_USB_DIAG/DPL config there will not be any UL config.
+ */
+ if (!IPA3_USB_IS_TTYPE_DPL(ttype))
+ ipa3_deregister_lock_unlock_callback(ul_clnt_hdl);
+
/* Change state to STOPPED */
if (!ipa3_usb_set_state(IPA_USB_STOPPED, false, ttype))
IPA_USB_ERR("failed to change state to stopped\n");
diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h
index b8a517e..530aa54 100644
--- a/drivers/platform/msm/ipa/ipa_common_i.h
+++ b/drivers/platform/msm/ipa/ipa_common_i.h
@@ -15,6 +15,7 @@
#ifndef _IPA_COMMON_I_H_
#define _IPA_COMMON_I_H_
+#include <linux/errno.h>
#include <linux/ipc_logging.h>
#include <linux/ipa.h>
#include <linux/ipa_uc_offload.h>
@@ -441,4 +442,7 @@
struct sg_table *in_sgt_ptr);
int ipa_smmu_free_sgt(struct sg_table **out_sgt_ptr);
+int ipa_ut_module_init(void);
+void ipa_ut_module_exit(void);
+
#endif /* _IPA_COMMON_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
index 84dce6f..50fe2a1 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
@@ -1495,8 +1495,16 @@
}
}
}
- mutex_unlock(&ipa_ctx->lock);
+ /* commit the change to IPA-HW */
+ if (ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v4) ||
+ ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v6)) {
+ IPAERR_RL("fail to commit flt-rule\n");
+ WARN_ON_RATELIMIT_IPA(1);
+ mutex_unlock(&ipa_ctx->lock);
+ return -EPERM;
+ }
+ mutex_unlock(&ipa_ctx->lock);
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
index 6285130..3241257 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
@@ -1426,6 +1426,15 @@
}
mutex_unlock(&ipa_ctx->lock);
+ /* commit the change to IPA-HW */
+ if (ipa_ctx->ctrl->ipa_commit_hdr()) {
+ IPAERR_RL("fail to commit hdr\n");
+ WARN_ON_RATELIMIT_IPA(1);
+ mutex_unlock(&ipa_ctx->lock);
+ return -EFAULT;
+ }
+
+ mutex_unlock(&ipa_ctx->lock);
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
index c043bad..15be5d6 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
@@ -327,6 +327,11 @@
size_t tmp;
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
+ if (!ipa_ctx->nat_mem.is_dev_init) {
+ IPAERR_RL("Nat table not initialized\n");
+ return -EPERM;
+ }
+
IPADBG("\n");
if (init->table_entries == 0) {
IPADBG("Table entries is zero\n");
@@ -576,6 +581,11 @@
int ret = 0;
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
+ if (!ipa_ctx->nat_mem.is_dev_init) {
+ IPAERR_RL("Nat table not initialized\n");
+ return -EPERM;
+ }
+
IPADBG("\n");
if (dma->entries <= 0) {
IPAERR_RL("Invalid number of commands %d\n",
@@ -762,6 +772,16 @@
int result;
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
+ if (!ipa_ctx->nat_mem.is_dev_init) {
+ IPAERR_RL("Nat table not initialized\n");
+ return -EPERM;
+ }
+
+ if (ipa_ctx->nat_mem.public_ip_addr) {
+ IPAERR_RL("Public IP addr not assigned and trying to delete\n");
+ return -EPERM;
+ }
+
IPADBG("\n");
if (ipa_ctx->nat_mem.is_tmp_mem) {
IPAERR("using temp memory during nat del\n");
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
index 7710279..073409b 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
@@ -1445,6 +1445,15 @@
}
}
}
+
+ /* commit the change to IPA-HW */
+ if (ipa_ctx->ctrl->ipa_commit_rt(IPA_IP_v4) ||
+ ipa_ctx->ctrl->ipa_commit_rt(IPA_IP_v6)) {
+ IPAERR("fail to commit rt-rule\n");
+ WARN_ON_RATELIMIT_IPA(1);
+ mutex_unlock(&ipa_ctx->lock);
+ return -EPERM;
+ }
mutex_unlock(&ipa_ctx->lock);
return 0;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index d76c208..681b009 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -72,6 +72,9 @@
#define INVALID_EP_MAPPING_INDEX (-1)
+#define IPA_IS_RAN_OUT_OF_EQ(__eq_array, __eq_index) \
+ (ARRAY_SIZE(__eq_array) <= (__eq_index))
+
struct ipa_ep_confing {
bool valid;
int pipe_num;
@@ -119,6 +122,7 @@
[IPA_2_0][IPA_CLIENT_MHI_PROD] = {true, 18},
[IPA_2_0][IPA_CLIENT_Q6_LAN_PROD] = {true, 6},
[IPA_2_0][IPA_CLIENT_Q6_CMD_PROD] = {true, 7},
+
[IPA_2_0][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD]
= {true, 12},
[IPA_2_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD]
@@ -1409,6 +1413,11 @@
}
if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
IPAERR("ran out of ihl_meq32 eq\n");
return -EPERM;
@@ -1426,6 +1435,11 @@
}
if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
IPAERR("ran out of ihl_meq32 eq\n");
return -EPERM;
diff --git a/drivers/platform/msm/ipa/ipa_v3/Makefile b/drivers/platform/msm/ipa/ipa_v3/Makefile
index ae4dccf..ed78342 100644
--- a/drivers/platform/msm/ipa/ipa_v3/Makefile
+++ b/drivers/platform/msm/ipa/ipa_v3/Makefile
@@ -1,9 +1,19 @@
obj-$(CONFIG_IPA3) += ipahal/
+ifdef CONFIG_X86
+ccflags-y += -DIPA_EMULATION_COMPILE=1
+else
+ccflags-y += -DIPA_EMULATION_COMPILE=0
+endif
+
obj-$(CONFIG_IPA3) += ipat.o
ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \
ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc_ntn.o \
ipa_hw_stats.o ipa_pm.o ipa_wdi3_i.o
+ifdef CONFIG_X86
+ipat-y += ipa_dtsi_replacement.o
+endif
+
obj-$(CONFIG_RMNET_IPA3) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index c523b3d..d64f89b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -35,6 +35,7 @@
#include <linux/time.h>
#include <linux/hashtable.h>
#include <linux/jhash.h>
+#include <linux/pci.h>
#include <soc/qcom/subsystem_restart.h>
#include <soc/qcom/smem.h>
#include <soc/qcom/scm.h>
@@ -57,169 +58,14 @@
#define CREATE_TRACE_POINTS
#include "ipa_trace.h"
-#define IPA_GPIO_IN_QUERY_CLK_IDX 0
-#define IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX 0
-#define IPA_GPIO_OUT_CLK_VOTE_IDX 1
-
-#define IPA_SUMMING_THRESHOLD (0x10)
-#define IPA_PIPE_MEM_START_OFST (0x0)
-#define IPA_PIPE_MEM_SIZE (0x0)
-#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \
- x == IPA_MODE_MOBILE_AP_WAN || \
- x == IPA_MODE_MOBILE_AP_WLAN)
-#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL)
-#define IPA_A5_MUX_HEADER_LENGTH (8)
-
-#define IPA_AGGR_MAX_STR_LENGTH (10)
-
-#define CLEANUP_TAG_PROCESS_TIMEOUT 500
-
-#define IPA_AGGR_STR_IN_BYTES(str) \
- (strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
-
-#define IPA_TRANSPORT_PROD_TIMEOUT_MSEC 100
-
-#define IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048
-
-#define IPA3_ACTIVE_CLIENT_LOG_TYPE_EP 0
-#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1
-#define IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2
-#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3
-
-#define IPA_MHI_GSI_EVENT_RING_ID_START 10
-#define IPA_MHI_GSI_EVENT_RING_ID_END 12
-
-#define IPA_SMEM_SIZE (8 * 1024)
-
-#define IPA_GSI_CHANNEL_HALT_MIN_SLEEP 5000
-#define IPA_GSI_CHANNEL_HALT_MAX_SLEEP 10000
-#define IPA_GSI_CHANNEL_HALT_MAX_TRY 10
-
-/* round addresses for closes page per SMMU requirements */
-#define IPA_SMMU_ROUND_TO_PAGE(iova, pa, size, iova_p, pa_p, size_p) \
- do { \
- (iova_p) = rounddown((iova), PAGE_SIZE); \
- (pa_p) = rounddown((pa), PAGE_SIZE); \
- (size_p) = roundup((size) + (pa) - (pa_p), PAGE_SIZE); \
- } while (0)
-
-
-/* The relative location in /lib/firmware where the FWs will reside */
-#define IPA_FWS_PATH "ipa/ipa_fws.elf"
+/*
+ * The following for adding code (ie. for EMULATION) not found on x86.
+ */
+#if IPA_EMULATION_COMPILE == 1
+# include "ipa_emulation_stubs.h"
+#endif
#ifdef CONFIG_COMPAT
-#define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_ADD_HDR, \
- compat_uptr_t)
-#define IPA_IOC_DEL_HDR32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_DEL_HDR, \
- compat_uptr_t)
-#define IPA_IOC_ADD_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_ADD_RT_RULE, \
- compat_uptr_t)
-#define IPA_IOC_DEL_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_DEL_RT_RULE, \
- compat_uptr_t)
-#define IPA_IOC_ADD_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_ADD_FLT_RULE, \
- compat_uptr_t)
-#define IPA_IOC_DEL_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_DEL_FLT_RULE, \
- compat_uptr_t)
-#define IPA_IOC_GET_RT_TBL32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_GET_RT_TBL, \
- compat_uptr_t)
-#define IPA_IOC_COPY_HDR32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_COPY_HDR, \
- compat_uptr_t)
-#define IPA_IOC_QUERY_INTF32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_QUERY_INTF, \
- compat_uptr_t)
-#define IPA_IOC_QUERY_INTF_TX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_QUERY_INTF_TX_PROPS, \
- compat_uptr_t)
-#define IPA_IOC_QUERY_INTF_RX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_QUERY_INTF_RX_PROPS, \
- compat_uptr_t)
-#define IPA_IOC_QUERY_INTF_EXT_PROPS32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_QUERY_INTF_EXT_PROPS, \
- compat_uptr_t)
-#define IPA_IOC_GET_HDR32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_GET_HDR, \
- compat_uptr_t)
-#define IPA_IOC_ALLOC_NAT_MEM32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_ALLOC_NAT_MEM, \
- compat_uptr_t)
-#define IPA_IOC_ALLOC_NAT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_ALLOC_NAT_TABLE, \
- compat_uptr_t)
-#define IPA_IOC_ALLOC_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_ALLOC_IPV6CT_TABLE, \
- compat_uptr_t)
-#define IPA_IOC_V4_INIT_NAT32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_V4_INIT_NAT, \
- compat_uptr_t)
-#define IPA_IOC_INIT_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_INIT_IPV6CT_TABLE, \
- compat_uptr_t)
-#define IPA_IOC_TABLE_DMA_CMD32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_TABLE_DMA_CMD, \
- compat_uptr_t)
-#define IPA_IOC_V4_DEL_NAT32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_V4_DEL_NAT, \
- compat_uptr_t)
-#define IPA_IOC_DEL_NAT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_DEL_NAT_TABLE, \
- compat_uptr_t)
-#define IPA_IOC_DEL_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_DEL_IPV6CT_TABLE, \
- compat_uptr_t)
-#define IPA_IOC_NAT_MODIFY_PDN32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_NAT_MODIFY_PDN, \
- compat_uptr_t)
-#define IPA_IOC_GET_NAT_OFFSET32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_GET_NAT_OFFSET, \
- compat_uptr_t)
-#define IPA_IOC_PULL_MSG32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_PULL_MSG, \
- compat_uptr_t)
-#define IPA_IOC_RM_ADD_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_RM_ADD_DEPENDENCY, \
- compat_uptr_t)
-#define IPA_IOC_RM_DEL_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_RM_DEL_DEPENDENCY, \
- compat_uptr_t)
-#define IPA_IOC_GENERATE_FLT_EQ32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_GENERATE_FLT_EQ, \
- compat_uptr_t)
-#define IPA_IOC_QUERY_RT_TBL_INDEX32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_QUERY_RT_TBL_INDEX, \
- compat_uptr_t)
-#define IPA_IOC_WRITE_QMAPID32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_WRITE_QMAPID, \
- compat_uptr_t)
-#define IPA_IOC_MDFY_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_MDFY_FLT_RULE, \
- compat_uptr_t)
-#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \
- compat_uptr_t)
-#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \
- compat_uptr_t)
-#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \
- compat_uptr_t)
-#define IPA_IOC_ADD_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_ADD_HDR_PROC_CTX, \
- compat_uptr_t)
-#define IPA_IOC_DEL_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_DEL_HDR_PROC_CTX, \
- compat_uptr_t)
-#define IPA_IOC_MDFY_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_MDFY_RT_RULE, \
- compat_uptr_t)
-
/**
* struct ipa3_ioc_nat_alloc_mem32 - nat table memory allocation
* properties
@@ -243,8 +89,7 @@
compat_size_t size;
compat_off_t offset;
};
-
-#endif
+#endif /* #ifdef CONFIG_COMPAT */
#define IPA_TZ_UNLOCK_ATTRIBUTE 0x0C0311
#define TZ_MEM_PROTECT_REGION_ID 0x10
@@ -284,6 +129,7 @@
static struct clk *ipa3_clk;
struct ipa3_context *ipa3_ctx;
+
static struct {
bool present[IPA_SMMU_CB_MAX];
bool arm_smmu;
@@ -2661,6 +2507,8 @@
*/
ipa3_q6_pipe_delay(false);
+ ipa3_set_usb_prod_pipe_delay();
+
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
IPADBG_LOW("Exit with success\n");
}
@@ -2735,6 +2583,16 @@
u32 *ipa_sram_mmio;
unsigned long phys_addr;
+ IPADBG(
+ "ipa_wrapper_base(0x%08X) ipa_reg_base_ofst(0x%08X) IPA_SRAM_DIRECT_ACCESS_n(0x%08X) smem_restricted_bytes(0x%08X) smem_sz(0x%08X)\n",
+ ipa3_ctx->ipa_wrapper_base,
+ ipa3_ctx->ctrl->ipa_reg_base_ofst,
+ ipahal_get_reg_n_ofst(
+ IPA_SRAM_DIRECT_ACCESS_n,
+ ipa3_ctx->smem_restricted_bytes / 4),
+ ipa3_ctx->smem_restricted_bytes,
+ ipa3_ctx->smem_sz);
+
phys_addr = ipa3_ctx->ipa_wrapper_base +
ipa3_ctx->ctrl->ipa_reg_base_ofst +
ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
@@ -3207,21 +3065,27 @@
}
IPADBG("Apps to IPA cmd pipe is connected\n");
+ IPADBG("Will initialize SRAM\n");
ipa3_ctx->ctrl->ipa_init_sram();
IPADBG("SRAM initialized\n");
+ IPADBG("Will initialize HDR\n");
ipa3_ctx->ctrl->ipa_init_hdr();
IPADBG("HDR initialized\n");
+ IPADBG("Will initialize V4 RT\n");
ipa3_ctx->ctrl->ipa_init_rt4();
IPADBG("V4 RT initialized\n");
+ IPADBG("Will initialize V6 RT\n");
ipa3_ctx->ctrl->ipa_init_rt6();
IPADBG("V6 RT initialized\n");
+ IPADBG("Will initialize V4 FLT\n");
ipa3_ctx->ctrl->ipa_init_flt4();
IPADBG("V4 FLT initialized\n");
+ IPADBG("Will initialize V6 FLT\n");
ipa3_ctx->ctrl->ipa_init_flt6();
IPADBG("V6 FLT initialized\n");
@@ -4412,8 +4276,8 @@
int result;
result = gsi_configure_regs(ipa3_res.transport_mem_base,
- ipa3_res.transport_mem_size,
- ipa3_res.ipa_mem_base);
+ ipa3_res.transport_mem_size,
+ ipa3_res.ipa_mem_base);
if (result) {
IPAERR("Failed to configure GSI registers\n");
return -EINVAL;
@@ -4506,12 +4370,19 @@
if (ipa3_ctx->ipa_hw_type != IPA_HW_v4_0)
ipa3_proxy_clk_vote();
- /* SMMU was already attached if used, safe to do allocations */
- if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio,
- ipa3_ctx->pdev)) {
- IPAERR("fail to init ipahal\n");
- result = -EFAULT;
- goto fail_ipahal;
+ /*
+ * SMMU was already attached if used, safe to do allocations
+ *
+ * NOTE WELL: On an emulation system, this allocation is done
+ * in ipa3_pre_init()
+ */
+ if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) {
+ if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio,
+ ipa3_ctx->pdev)) {
+ IPAERR("fail to init ipahal\n");
+ result = -EFAULT;
+ goto fail_ipahal;
+ }
}
result = ipa3_init_hw();
@@ -4646,9 +4517,18 @@
gsi_props.ver = ipa3_get_gsi_ver(resource_p->ipa_hw_type);
gsi_props.ee = resource_p->ee;
gsi_props.intr = GSI_INTR_IRQ;
- gsi_props.irq = resource_p->transport_irq;
gsi_props.phys_addr = resource_p->transport_mem_base;
gsi_props.size = resource_p->transport_mem_size;
+ if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
+ gsi_props.irq = resource_p->emulator_irq;
+ gsi_props.emulator_intcntrlr_client_isr = ipa3_get_isr();
+ gsi_props.emulator_intcntrlr_addr =
+ resource_p->emulator_intcntrlr_mem_base;
+ gsi_props.emulator_intcntrlr_size =
+ resource_p->emulator_intcntrlr_mem_size;
+ } else {
+ gsi_props.irq = resource_p->transport_irq;
+ }
gsi_props.notify_cb = ipa_gsi_notify_cb;
gsi_props.req_clk_cb = NULL;
gsi_props.rel_clk_cb = NULL;
@@ -4716,12 +4596,12 @@
ipa3_register_panic_hdlr();
+ ipa3_debugfs_init();
+
mutex_lock(&ipa3_ctx->lock);
ipa3_ctx->ipa_initialization_complete = true;
mutex_unlock(&ipa3_ctx->lock);
- ipa3_debugfs_init();
-
ipa3_trigger_ipa_ready_cbs();
complete_all(&ipa3_ctx->init_completion_obj);
pr_info("IPA driver initialization was successful.\n");
@@ -4734,15 +4614,16 @@
gsi_deregister_device(ipa3_ctx->gsi_dev_hdl, false);
fail_register_device:
ipa3_destroy_flt_tbl_idrs();
- ipa3_proxy_clk_unvote();
fail_allok_pkt_init:
ipa3_nat_ipv6ct_destroy_devices();
fail_nat_ipv6ct_init_dev:
ipa3_free_dma_task_for_gsi();
fail_dma_task:
fail_init_hw:
- ipahal_destroy();
+ if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION)
+ ipahal_destroy();
fail_ipahal:
+ ipa3_proxy_clk_unvote();
return result;
}
@@ -4751,10 +4632,24 @@
{
int result;
const struct firmware *fw;
+ const char *path = IPA_FWS_PATH;
- IPADBG("Manual FW loading process initiated\n");
+ if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
+ switch (ipa3_get_emulation_type()) {
+ case IPA_HW_v3_5_1:
+ path = IPA_FWS_PATH_3_5_1;
+ break;
+ case IPA_HW_v4_0:
+ path = IPA_FWS_PATH_4_0;
+ break;
+ default:
+ break;
+ }
+ }
- result = request_firmware(&fw, IPA_FWS_PATH, ipa3_ctx->cdev.dev);
+ IPADBG("Manual FW loading (%s) process initiated\n", path);
+
+ result = request_firmware(&fw, path, ipa3_ctx->cdev.dev);
if (result < 0) {
IPAERR("request_firmware failed, error %d\n", result);
return result;
@@ -4766,7 +4661,13 @@
IPADBG("FWs are available for loading\n");
- result = ipa3_load_fws(fw, ipa3_res.transport_mem_base);
+ if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
+ result = emulator_load_fws(fw,
+ ipa3_res.transport_mem_base,
+ ipa3_res.transport_mem_size);
+ } else {
+ result = ipa3_load_fws(fw, ipa3_res.transport_mem_base);
+ }
if (result) {
IPAERR("Manual IPA FWs loading has failed\n");
release_firmware(fw);
@@ -4785,6 +4686,7 @@
release_firmware(fw);
IPADBG("Manual FW loading process is complete\n");
+
return 0;
}
@@ -4818,7 +4720,8 @@
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
- if (ipa3_is_msm_device() || (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5))
+ if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION &&
+ (ipa3_is_msm_device() || (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5)))
result = ipa3_pil_load_ipa_fws();
else
result = ipa3_manual_load_ipa_fws();
@@ -5155,7 +5058,8 @@
goto fail_init_mem_partition;
}
- if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL) {
+ if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL &&
+ ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) {
ipa3_ctx->ctrl->msm_bus_data_ptr =
msm_bus_cl_get_pdata(ipa3_ctx->master_pdev);
if (ipa3_ctx->ctrl->msm_bus_data_ptr == NULL) {
@@ -5205,6 +5109,28 @@
goto fail_remap;
}
+ IPADBG(
+ "base(0x%x)+offset(0x%x)=(0x%x) mapped to (%pK) with len (0x%x)\n",
+ resource_p->ipa_mem_base,
+ ipa3_ctx->ctrl->ipa_reg_base_ofst,
+ resource_p->ipa_mem_base + ipa3_ctx->ctrl->ipa_reg_base_ofst,
+ ipa3_ctx->mmio,
+ resource_p->ipa_mem_size);
+
+ /*
+ * Emulation requires ipahal be initialized early...for FW
+ * download, hence...
+ */
+ if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
+ if (ipahal_init(ipa3_ctx->ipa_hw_type,
+ ipa3_ctx->mmio,
+ &(ipa3_ctx->master_pdev->dev))) {
+ IPAERR("fail to init ipahal\n");
+ result = -EFAULT;
+ goto fail_ipahal_init;
+ }
+ }
+
mutex_init(&ipa3_ctx->ipa3_active_clients.mutex);
IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
@@ -5413,10 +5339,13 @@
* We can't register the GSI driver yet, as it expects
* the GSI FW to be up and running before the registration.
*
- * For IPA3.0, the GSI configuration is done by the GSI driver.
+ * For IPA3.0 and the emulation system, the GSI configuration
+ * is done by the GSI driver.
+ *
* For IPA3.1 (and on), the GSI configuration is done by TZ.
*/
- if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_0) {
+ if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_0 ||
+ ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
result = ipa3_gsi_pre_fw_load_init();
if (result) {
IPAERR("gsi pre FW loading config failed\n");
@@ -5494,6 +5423,9 @@
fail_create_transport_wq:
destroy_workqueue(ipa3_ctx->power_mgmt_wq);
fail_init_hw:
+ if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION)
+ ipahal_destroy();
+fail_ipahal_init:
iounmap(ipa3_ctx->mmio);
fail_remap:
ipa3_disable_clks();
@@ -5806,6 +5738,7 @@
&ipa_drv_res->ee);
if (result)
ipa_drv_res->ee = 0;
+ IPADBG(":ee = %u\n", ipa_drv_res->ee);
ipa_drv_res->apply_rg10_wa =
of_property_read_bool(pdev->dev.of_node,
@@ -5818,7 +5751,7 @@
of_property_read_bool(pdev->dev.of_node,
"qcom,do-not-use-ch-gsi-20");
IPADBG(": GSI CH 20 WA is = %s\n",
- ipa_drv_res->apply_rg10_wa
+ ipa_drv_res->gsi_ch20_wa
? "Needed" : "Not needed");
elem_num = of_property_count_elems_of_size(pdev->dev.of_node,
@@ -5897,6 +5830,26 @@
return result;
}
+ /*
+ * If we're on emulator, get its interrupt controller's mem
+ * start and size
+ */
+ if (ipa_drv_res->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
+ resource = platform_get_resource_byname(
+ pdev, IORESOURCE_MEM, "intctrl-base");
+ if (!resource) {
+ IPAERR(":Can't find intctrl-base resource\n");
+ return -ENODEV;
+ }
+ ipa_drv_res->emulator_intcntrlr_mem_base =
+ resource->start;
+ ipa_drv_res->emulator_intcntrlr_mem_size =
+ resource_size(resource);
+ IPADBG(":using intctrl-base at 0x%x of size 0x%x\n",
+ ipa_drv_res->emulator_intcntrlr_mem_base,
+ ipa_drv_res->emulator_intcntrlr_mem_size);
+ }
+
return 0;
}
@@ -6691,5 +6644,216 @@
return 0;
}
+/**************************************************************
+ * PCIe Version
+ *************************************************************/
+
+int ipa3_pci_drv_probe(
+ struct pci_dev *pci_dev,
+ struct ipa_api_controller *api_ctrl,
+ const struct of_device_id *pdrv_match)
+{
+ int result;
+ struct ipa3_plat_drv_res *ipa_drv_res;
+ u32 bar0_offset;
+ u32 mem_start;
+ u32 mem_end;
+ uint32_t bits;
+ uint32_t ipa_start, gsi_start, intctrl_start;
+ struct device *dev;
+ static struct platform_device platform_dev;
+
+ if (!pci_dev || !api_ctrl || !pdrv_match) {
+ IPAERR(
+ "Bad arg: pci_dev (%pK) and/or api_ctrl (%pK) and/or pdrv_match (%pK)\n",
+ pci_dev, api_ctrl, pdrv_match);
+ return -EOPNOTSUPP;
+ }
+
+ dev = &(pci_dev->dev);
+
+ IPADBG("IPA PCI driver probing started\n");
+
+ /*
+ * Follow PCI driver flow here.
+ * pci_enable_device: Enables device and assigns resources
+ * pci_request_region: Makes BAR0 address region usable
+ */
+ result = pci_enable_device(pci_dev);
+ if (result < 0) {
+ IPAERR("pci_enable_device() failed\n");
+ return -EOPNOTSUPP;
+ }
+
+ result = pci_request_region(pci_dev, 0, "IPA Memory");
+ if (result < 0) {
+ IPAERR("pci_request_region() failed\n");
+ pci_disable_device(pci_dev);
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * When in the PCI/emulation environment, &platform_dev is
+ * passed to get_ipa_dts_configuration(), but is unused, since
+ * all usages of it in the function are replaced by CPP
+ * relative to definitions in ipa_emulation_stubs.h. Passing
+ * &platform_dev makes code validity tools happy.
+ */
+ if (get_ipa_dts_configuration(&platform_dev, &ipa3_res) != 0) {
+ IPAERR("get_ipa_dts_configuration() failed\n");
+ pci_release_region(pci_dev, 0);
+ pci_disable_device(pci_dev);
+ return -EOPNOTSUPP;
+ }
+
+ ipa_drv_res = &ipa3_res;
+
+ result =
+ of_property_read_u32(NULL, "emulator-bar0-offset",
+ &bar0_offset);
+ if (result) {
+ IPAERR(":get resource failed for emulator-bar0-offset!\n");
+ pci_release_region(pci_dev, 0);
+ pci_disable_device(pci_dev);
+ return -ENODEV;
+ }
+ IPADBG(":using emulator-bar0-offset 0x%08X\n", bar0_offset);
+
+ ipa_start = ipa_drv_res->ipa_mem_base;
+ gsi_start = ipa_drv_res->transport_mem_base;
+ intctrl_start = ipa_drv_res->emulator_intcntrlr_mem_base;
+
+ /*
+ * Where will we be inerrupted at?
+ */
+ ipa_drv_res->emulator_irq = pci_dev->irq;
+ IPADBG(
+ "EMULATION PCI_INTERRUPT_PIN(%u)\n",
+ ipa_drv_res->emulator_irq);
+
+ /*
+ * Set the ipa_mem_base to the PCI base address of BAR0
+ */
+ mem_start = pci_resource_start(pci_dev, 0);
+ mem_end = pci_resource_end(pci_dev, 0);
+
+ IPADBG("PCI START = 0x%x\n", mem_start);
+ IPADBG("PCI END = 0x%x\n", mem_end);
+
+ ipa_drv_res->ipa_mem_base = mem_start + bar0_offset;
+
+ smmu_info.ipa_base = ipa_drv_res->ipa_mem_base;
+ smmu_info.ipa_size = ipa_drv_res->ipa_mem_size;
+
+ ipa_drv_res->transport_mem_base =
+ ipa_drv_res->ipa_mem_base + (gsi_start - ipa_start);
+
+ ipa_drv_res->emulator_intcntrlr_mem_base =
+ ipa_drv_res->ipa_mem_base + (intctrl_start - ipa_start);
+
+ IPADBG("ipa_mem_base = 0x%x\n",
+ ipa_drv_res->ipa_mem_base);
+ IPADBG("ipa_mem_size = 0x%x\n",
+ ipa_drv_res->ipa_mem_size);
+
+ IPADBG("transport_mem_base = 0x%x\n",
+ ipa_drv_res->transport_mem_base);
+ IPADBG("transport_mem_size = 0x%x\n",
+ ipa_drv_res->transport_mem_size);
+
+ IPADBG("emulator_intcntrlr_mem_base = 0x%x\n",
+ ipa_drv_res->emulator_intcntrlr_mem_base);
+ IPADBG("emulator_intcntrlr_mem_size = 0x%x\n",
+ ipa_drv_res->emulator_intcntrlr_mem_size);
+
+ result = ipa3_bind_api_controller(ipa_drv_res->ipa_hw_type, api_ctrl);
+ if (result != 0) {
+ IPAERR("ipa3_bind_api_controller() failed\n");
+ pci_release_region(pci_dev, 0);
+ pci_disable_device(pci_dev);
+ return result;
+ }
+
+ bits = (ipa_drv_res->use_64_bit_dma_mask) ? 64 : 32;
+
+ if (dma_set_mask(dev, DMA_BIT_MASK(bits)) != 0) {
+ IPAERR("dma_set_mask(%pK, %u) failed\n", dev, bits);
+ pci_release_region(pci_dev, 0);
+ pci_disable_device(pci_dev);
+ return -EOPNOTSUPP;
+ }
+
+ if (dma_set_coherent_mask(dev, DMA_BIT_MASK(bits)) != 0) {
+ IPAERR("dma_set_coherent_mask(%pK, %u) failed\n", dev, bits);
+ pci_release_region(pci_dev, 0);
+ pci_disable_device(pci_dev);
+ return -EOPNOTSUPP;
+ }
+
+ pci_set_master(pci_dev);
+
+ memset(&platform_dev, 0, sizeof(platform_dev));
+ platform_dev.dev = *dev;
+
+ /* Proceed to real initialization */
+ result = ipa3_pre_init(&ipa3_res, &platform_dev);
+ if (result) {
+ IPAERR("ipa3_init failed\n");
+ pci_clear_master(pci_dev);
+ pci_release_region(pci_dev, 0);
+ pci_disable_device(pci_dev);
+ return result;
+ }
+
+ return result;
+}
+
+/*
+ * The following returns transport register memory location and
+ * size...
+ */
+int ipa3_get_transport_info(
+ phys_addr_t *phys_addr_ptr,
+ unsigned long *size_ptr)
+{
+ if (!phys_addr_ptr || !size_ptr) {
+ IPAERR("Bad arg: phys_addr_ptr(%pK) and/or size_ptr(%pK)\n",
+ phys_addr_ptr, size_ptr);
+ return -EINVAL;
+ }
+
+ *phys_addr_ptr = ipa3_res.transport_mem_base;
+ *size_ptr = ipa3_res.transport_mem_size;
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa3_get_transport_info);
+
+static uint emulation_type = IPA_HW_v4_0;
+
+/*
+ * The following returns emulation type...
+ */
+uint ipa3_get_emulation_type(void)
+{
+ return emulation_type;
+}
+
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("IPA HW device driver");
+
+/*
+ * Module parameter. Invoke as follows:
+ * insmod ipat.ko emulation_type=[13|14|...|N]
+ * Examples:
+ * insmod ipat.ko emulation_type=13 # for IPA 3.5.1
+ * insmod ipat.ko emulation_type=14 # for IPA 4.0
+ *
+ * NOTE: The emulation_type values need to come from: enum ipa_hw_type
+ *
+ */
+
+module_param(emulation_type, uint, 0000);
+MODULE_PARM_DESC(
+ emulation_type,
+ "IPA emulation type (Use 13 for IPA 3.5.1, 14 for IPA 4.0)");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index fe39440..5bcd49e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -635,6 +635,69 @@
return 0;
}
+void ipa3_register_lock_unlock_callback(int (*client_cb)(bool is_lock),
+ u32 ipa_ep_idx)
+{
+ struct ipa3_ep_context *ep;
+
+ IPADBG("entry\n");
+
+ ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+ if (!ep->valid) {
+ IPAERR("Invalid EP\n");
+ return;
+ }
+
+ if (client_cb == NULL) {
+ IPAERR("Bad Param");
+ return;
+ }
+
+ ep->client_lock_unlock = client_cb;
+ IPADBG("exit\n");
+}
+
+void ipa3_deregister_lock_unlock_callback(u32 ipa_ep_idx)
+{
+ struct ipa3_ep_context *ep;
+
+ IPADBG("entry\n");
+
+ ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+ if (!ep->valid) {
+ IPAERR("Invalid EP\n");
+ return;
+ }
+
+ if (ep->client_lock_unlock == NULL) {
+ IPAERR("client_lock_unlock is already NULL");
+ return;
+ }
+
+ ep->client_lock_unlock = NULL;
+ IPADBG("exit\n");
+}
+
+static void client_lock_unlock_cb(u32 ipa_ep_idx, bool is_lock)
+{
+ struct ipa3_ep_context *ep;
+
+ IPADBG("entry\n");
+
+ ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+ if (!ep->valid) {
+ IPAERR("Invalid EP\n");
+ return;
+ }
+
+ if (ep->client_lock_unlock)
+ ep->client_lock_unlock(is_lock);
+
+ IPADBG("exit\n");
+}
int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params,
struct ipa_req_chan_out_params *out_params)
@@ -1259,6 +1322,46 @@
return result;
}
+/*
+ * Set USB PROD pipe delay for MBIM/RMNET config
+ * Clocks, should be voted before calling this API
+ * locks should be taken before calling this API
+ */
+
+void ipa3_set_usb_prod_pipe_delay(void)
+{
+ int result;
+ int pipe_idx;
+ struct ipa3_ep_context *ep;
+ struct ipa_ep_cfg_ctrl ep_ctrl;
+
+ memset(&ep_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_ctrl.ipa_ep_delay = true;
+
+
+ pipe_idx = ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD);
+
+ if (pipe_idx == IPA_EP_NOT_ALLOCATED) {
+ IPAERR("client (%d) not valid\n", IPA_CLIENT_USB_PROD);
+ return;
+ }
+
+ ep = &ipa3_ctx->ep[pipe_idx];
+
+ /* Setting delay on USB_PROD with skip_ep_cfg */
+ client_lock_unlock_cb(pipe_idx, true);
+ if (ep->valid && ep->skip_ep_cfg) {
+ ep->ep_delay_set = ep_ctrl.ipa_ep_delay;
+ result = ipa3_cfg_ep_ctrl(pipe_idx, &ep_ctrl);
+ if (result)
+ IPAERR("client (ep: %d) failed result=%d\n",
+ pipe_idx, result);
+ else
+ IPADBG("client (ep: %d) success\n", pipe_idx);
+ }
+ client_lock_unlock_cb(pipe_idx, false);
+}
+
void ipa3_xdci_ep_delay_rm(u32 clnt_hdl)
{
struct ipa3_ep_context *ep;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index dc5f5e0..4751c75 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -2291,6 +2291,13 @@
goto fail;
}
+ file = debugfs_create_u32("clk_rate", IPA_READ_ONLY_MODE,
+ dent, &ipa3_ctx->curr_ipa_clk_rate);
+ if (!file) {
+ IPAERR("could not create clk_rate file\n");
+ goto fail;
+ }
+
ipa_debugfs_init_stats(dent);
return;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index e73349a..84124ab 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -1110,11 +1110,6 @@
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
ipa3_disable_data_path(clnt_hdl);
- if (ep->napi_enabled) {
- do {
- usleep_range(95, 105);
- } while (atomic_read(&ep->sys->curr_polling_state));
- }
if (IPA_CLIENT_IS_PROD(ep->client)) {
do {
@@ -1128,9 +1123,6 @@
} while (1);
}
- if (IPA_CLIENT_IS_CONS(ep->client))
- cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
- flush_workqueue(ep->sys->wq);
/* channel stop might fail on timeout if IPA is busy */
for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
result = ipa3_stop_gsi_channel(clnt_hdl);
@@ -1138,7 +1130,7 @@
break;
if (result != -GSI_STATUS_AGAIN &&
- result != -GSI_STATUS_TIMED_OUT)
+ result != -GSI_STATUS_TIMED_OUT)
break;
}
@@ -1147,6 +1139,17 @@
ipa_assert();
return result;
}
+
+ if (ep->napi_enabled) {
+ do {
+ usleep_range(95, 105);
+ } while (atomic_read(&ep->sys->curr_polling_state));
+ }
+
+ if (IPA_CLIENT_IS_CONS(ep->client))
+ cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
+ flush_workqueue(ep->sys->wq);
+
result = ipa3_reset_gsi_channel(clnt_hdl);
if (result != GSI_STATUS_SUCCESS) {
IPAERR("Failed to reset chan: %d.\n", result);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dtsi_replacement.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dtsi_replacement.c
new file mode 100644
index 0000000..5fe2294
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dtsi_replacement.c
@@ -0,0 +1,765 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/msm_ipa.h>
+#include "ipa_i.h"
+#include "ipa_emulation_stubs.h"
+
+# undef strsame
+# define strsame(x, y) \
+ (!strcmp((x), (y)))
+
+/*
+ * The following enum values used to index tables below.
+ */
+enum dtsi_index_e {
+ DTSI_INDEX_3_5_1 = 0,
+ DTSI_INDEX_4_0 = 1,
+};
+
+struct dtsi_replacement_u32 {
+ char *key;
+ u32 value;
+};
+
+struct dtsi_replacement_u32_table {
+ struct dtsi_replacement_u32 *p_table;
+ u32 num_entries;
+};
+
+struct dtsi_replacement_bool {
+ char *key;
+ bool value;
+};
+
+struct dtsi_replacement_bool_table {
+ struct dtsi_replacement_bool *p_table;
+ u32 num_entries;
+};
+
+struct dtsi_replacement_u32_array {
+ char *key;
+ u32 *p_value;
+ u32 num_elements;
+};
+
+struct dtsi_replacement_u32_array_table {
+ struct dtsi_replacement_u32_array *p_table;
+ u32 num_entries;
+};
+
+struct dtsi_replacement_resource_table {
+ struct resource *p_table;
+ u32 num_entries;
+};
+
+/*
+ * Any of the data below with _4_0 in the name represent data taken
+ * from the 4.0 dtsi file.
+ *
+ * Any of the data below with _3_5_1 in the name represent data taken
+ * from the 3.5.1 dtsi file.
+ */
+static struct dtsi_replacement_bool ipa3_plat_drv_bool_4_0[] = {
+ {"qcom,use-ipa-tethering-bridge", true},
+ {"qcom,modem-cfg-emb-pipe-flt", true},
+ {"qcom,ipa-wdi2", true},
+ {"qcom,use-64-bit-dma-mask", false},
+ {"qcom,bandwidth-vote-for-ipa", false},
+ {"qcom,skip-uc-pipe-reset", false},
+ {"qcom,tethered-flow-control", true},
+ {"qcom,use-rg10-limitation-mitigation", false},
+ {"qcom,do-not-use-ch-gsi-20", false},
+ {"qcom,use-ipa-pm", false},
+};
+
+static struct dtsi_replacement_bool ipa3_plat_drv_bool_3_5_1[] = {
+ {"qcom,use-ipa-tethering-bridge", true},
+ {"qcom,modem-cfg-emb-pipe-flt", true},
+ {"qcom,ipa-wdi2", true},
+ {"qcom,use-64-bit-dma-mask", false},
+ {"qcom,bandwidth-vote-for-ipa", true},
+ {"qcom,skip-uc-pipe-reset", false},
+ {"qcom,tethered-flow-control", false},
+ {"qcom,use-rg10-limitation-mitigation", false},
+ {"qcom,do-not-use-ch-gsi-20", false},
+ {"qcom,use-ipa-pm", false},
+};
+
+static struct dtsi_replacement_bool_table
+ipa3_plat_drv_bool_table[] = {
+ { ipa3_plat_drv_bool_3_5_1,
+ ARRAY_SIZE(ipa3_plat_drv_bool_3_5_1) },
+ { ipa3_plat_drv_bool_4_0,
+ ARRAY_SIZE(ipa3_plat_drv_bool_4_0) },
+};
+
+static struct dtsi_replacement_u32 ipa3_plat_drv_u32_4_0[] = {
+ {"qcom,ipa-hw-ver", IPA_HW_v4_0},
+ {"qcom,ipa-hw-mode", 3},
+ {"qcom,wan-rx-ring-size", 192},
+ {"qcom,lan-rx-ring-size", 192},
+ {"qcom,ee", 0},
+ {"emulator-bar0-offset", 0x01C00000},
+};
+
+static struct dtsi_replacement_u32 ipa3_plat_drv_u32_3_5_1[] = {
+ {"qcom,ipa-hw-ver", IPA_HW_v3_5_1},
+ {"qcom,ipa-hw-mode", 3},
+ {"qcom,wan-rx-ring-size", 192},
+ {"qcom,lan-rx-ring-size", 192},
+ {"qcom,ee", 0},
+ {"emulator-bar0-offset", 0x01C00000},
+};
+
+static struct dtsi_replacement_u32_table ipa3_plat_drv_u32_table[] = {
+ { ipa3_plat_drv_u32_3_5_1,
+ ARRAY_SIZE(ipa3_plat_drv_u32_3_5_1) },
+ { ipa3_plat_drv_u32_4_0,
+ ARRAY_SIZE(ipa3_plat_drv_u32_4_0) },
+};
+
+static u32 mhi_event_ring_id_limits_array_4_0[] = {
+ 9, 10
+};
+
+static u32 mhi_event_ring_id_limits_array_3_5_1[] = {
+ IPA_MHI_GSI_EVENT_RING_ID_START, IPA_MHI_GSI_EVENT_RING_ID_END
+};
+
+static u32 ipa_tz_unlock_reg_array_4_0[] = {
+ 0x04043583c, 0x00001000
+};
+
+static u32 ipa_tz_unlock_reg_array_3_5_1[] = {
+ 0x04043583c, 0x00001000
+};
+
+static u32 ipa_ram_mmap_array_4_0[] = {
+ 0x00000280, 0x00000000, 0x00000000, 0x00000288, 0x00000078,
+ 0x00004000, 0x00000308, 0x00000078, 0x00004000, 0x00000388,
+ 0x00000078, 0x00004000, 0x00000408, 0x00000078, 0x00004000,
+ 0x0000000F, 0x00000000, 0x00000007, 0x00000008, 0x0000000E,
+ 0x00000488, 0x00000078, 0x00004000, 0x00000508, 0x00000078,
+ 0x00004000, 0x0000000F, 0x00000000, 0x00000007, 0x00000008,
+ 0x0000000E, 0x00000588, 0x00000078, 0x00004000, 0x00000608,
+ 0x00000078, 0x00004000, 0x00000688, 0x00000140, 0x000007C8,
+ 0x00000000, 0x00000800, 0x000007D0, 0x00000200, 0x000009D0,
+ 0x00000200, 0x00000000, 0x00000000, 0x00000000, 0x000013F0,
+ 0x0000100C, 0x000023FC, 0x00000000, 0x000023FC, 0x00000000,
+ 0x000023FC, 0x00000000, 0x000023FC, 0x00000000, 0x00000080,
+ 0x00000200, 0x00002800, 0x000023FC, 0x00000000, 0x000023FC,
+ 0x00000000, 0x000023FC, 0x00000000, 0x000023FC, 0x00000000,
+ 0x00002400, 0x00000400, 0x00000BD8, 0x00000050, 0x00000C30,
+ 0x00000060, 0x00000C90, 0x00000140, 0x00000DD0, 0x00000180,
+ 0x00000F50, 0x00000180, 0x000010D0, 0x00000180, 0x00001250,
+ 0x00000180, 0x000013D0, 0x00000020
+};
+
+static u32 ipa_ram_mmap_array_3_5_1[] = {
+ 0x00000280, 0x00000000, 0x00000000, 0x00000288, 0x00000078,
+ 0x00004000, 0x00000308, 0x00000078, 0x00004000, 0x00000388,
+ 0x00000078, 0x00004000, 0x00000408, 0x00000078, 0x00004000,
+ 0x0000000F, 0x00000000, 0x00000007, 0x00000008, 0x0000000E,
+ 0x00000488, 0x00000078, 0x00004000, 0x00000508, 0x00000078,
+ 0x00004000, 0x0000000F, 0x00000000, 0x00000007, 0x00000008,
+ 0x0000000E, 0x00000588, 0x00000078, 0x00004000, 0x00000608,
+ 0x00000078, 0x00004000, 0x00000688, 0x00000140, 0x000007C8,
+ 0x00000000, 0x00000800, 0x000007D0, 0x00000200, 0x000009D0,
+ 0x00000200, 0x00000000, 0x00000000, 0x00000000, 0x00000BD8,
+ 0x00001024, 0x00002000, 0x00000000, 0x00002000, 0x00000000,
+ 0x00002000, 0x00000000, 0x00002000, 0x00000000, 0x00000080,
+ 0x00000200, 0x00002000, 0x00002000, 0x00000000, 0x00002000,
+ 0x00000000, 0x00002000, 0x00000000, 0x00002000, 0x00000000,
+ 0x00001C00, 0x00000400
+};
+
+struct dtsi_replacement_u32_array ipa3_plat_drv_u32_array_4_0[] = {
+ {"qcom,mhi-event-ring-id-limits",
+ mhi_event_ring_id_limits_array_4_0,
+ ARRAY_SIZE(mhi_event_ring_id_limits_array_4_0) },
+ {"qcom,ipa-tz-unlock-reg",
+ ipa_tz_unlock_reg_array_4_0,
+ ARRAY_SIZE(ipa_tz_unlock_reg_array_4_0) },
+ {"qcom,ipa-ram-mmap",
+ ipa_ram_mmap_array_4_0,
+ ARRAY_SIZE(ipa_ram_mmap_array_4_0) },
+};
+
+struct dtsi_replacement_u32_array ipa3_plat_drv_u32_array_3_5_1[] = {
+ {"qcom,mhi-event-ring-id-limits",
+ mhi_event_ring_id_limits_array_3_5_1,
+ ARRAY_SIZE(mhi_event_ring_id_limits_array_3_5_1) },
+ {"qcom,ipa-tz-unlock-reg",
+ ipa_tz_unlock_reg_array_3_5_1,
+ ARRAY_SIZE(ipa_tz_unlock_reg_array_3_5_1) },
+ {"qcom,ipa-ram-mmap",
+ ipa_ram_mmap_array_3_5_1,
+ ARRAY_SIZE(ipa_ram_mmap_array_3_5_1) },
+};
+
+struct dtsi_replacement_u32_array_table
+ipa3_plat_drv_u32_array_table[] = {
+ { ipa3_plat_drv_u32_array_3_5_1,
+ ARRAY_SIZE(ipa3_plat_drv_u32_array_3_5_1) },
+ { ipa3_plat_drv_u32_array_4_0,
+ ARRAY_SIZE(ipa3_plat_drv_u32_array_4_0) },
+};
+
+#define INTCTRL_OFFSET 0x083C0000
+#define INTCTRL_SIZE 0x00000110
+
+#define IPA_BASE_OFFSET_4_0 0x01e00000
+#define IPA_BASE_SIZE_4_0 0x00034000
+#define GSI_BASE_OFFSET_4_0 0x01e04000
+#define GSI_BASE_SIZE_4_0 0x00028000
+
+struct resource ipa3_plat_drv_resource_4_0[] = {
+ /*
+ * PLEASE NOTE WELL: The following offset values below
+ * ("ipa-base", "gsi-base", and "intctrl-base") are used to
+ * calculate offsets relative to the PCI BAR0 address provided
+ * by the PCI probe. After their use to calculate the
+ * offsets, they are not used again, since PCI ultimately
+ * dictates where things live.
+ */
+ {
+ IPA_BASE_OFFSET_4_0,
+ (IPA_BASE_OFFSET_4_0 + IPA_BASE_SIZE_4_0),
+ "ipa-base",
+ IORESOURCE_MEM,
+ 0,
+ NULL,
+ NULL,
+ NULL
+ },
+
+ {
+ GSI_BASE_OFFSET_4_0,
+ (GSI_BASE_OFFSET_4_0 + GSI_BASE_SIZE_4_0),
+ "gsi-base",
+ IORESOURCE_MEM,
+ 0,
+ NULL,
+ NULL,
+ NULL
+ },
+
+ /*
+ * The following entry is germane only to the emulator
+ * environment. It is needed to locate the emulator's PCI
+ * interrupt controller...
+ */
+ {
+ INTCTRL_OFFSET,
+ (INTCTRL_OFFSET + INTCTRL_SIZE),
+ "intctrl-base",
+ IORESOURCE_MEM,
+ 0,
+ NULL,
+ NULL,
+ NULL
+ },
+
+ {
+ IPA_PIPE_MEM_START_OFST,
+ (IPA_PIPE_MEM_START_OFST + IPA_PIPE_MEM_SIZE),
+ "ipa-pipe-mem",
+ IORESOURCE_MEM,
+ 0,
+ NULL,
+ NULL,
+ NULL
+ },
+
+ {
+ 0,
+ 0,
+ "gsi-irq",
+ IORESOURCE_IRQ,
+ 0,
+ NULL,
+ NULL,
+ NULL
+ },
+
+ {
+ 0,
+ 0,
+ "ipa-irq",
+ IORESOURCE_IRQ,
+ 0,
+ NULL,
+ NULL,
+ NULL
+ },
+};
+
+#define IPA_BASE_OFFSET_3_5_1 0x01e00000
+#define IPA_BASE_SIZE_3_5_1 0x00034000
+#define GSI_BASE_OFFSET_3_5_1 0x01e04000
+#define GSI_BASE_SIZE_3_5_1 0x0002c000
+
+struct resource ipa3_plat_drv_resource_3_5_1[] = {
+ /*
+ * PLEASE NOTE WELL: The following offset values below
+ * ("ipa-base", "gsi-base", and "intctrl-base") are used to
+ * calculate offsets relative to the PCI BAR0 address provided
+ * by the PCI probe. After their use to calculate the
+ * offsets, they are not used again, since PCI ultimately
+ * dictates where things live.
+ */
+ {
+ IPA_BASE_OFFSET_3_5_1,
+ (IPA_BASE_OFFSET_3_5_1 + IPA_BASE_SIZE_3_5_1),
+ "ipa-base",
+ IORESOURCE_MEM,
+ 0,
+ NULL,
+ NULL,
+ NULL
+ },
+
+ {
+ GSI_BASE_OFFSET_3_5_1,
+ (GSI_BASE_OFFSET_3_5_1 + GSI_BASE_SIZE_3_5_1),
+ "gsi-base",
+ IORESOURCE_MEM,
+ 0,
+ NULL,
+ NULL,
+ NULL
+ },
+
+ /*
+ * The following entry is germane only to the emulator
+ * environment. It is needed to locate the emulator's PCI
+ * interrupt controller...
+ */
+ {
+ INTCTRL_OFFSET,
+ (INTCTRL_OFFSET + INTCTRL_SIZE),
+ "intctrl-base",
+ IORESOURCE_MEM,
+ 0,
+ NULL,
+ NULL,
+ NULL
+ },
+
+ {
+ IPA_PIPE_MEM_START_OFST,
+ (IPA_PIPE_MEM_START_OFST + IPA_PIPE_MEM_SIZE),
+ "ipa-pipe-mem",
+ IORESOURCE_MEM,
+ 0,
+ NULL,
+ NULL,
+ NULL
+ },
+
+ {
+ 0,
+ 0,
+ "gsi-irq",
+ IORESOURCE_IRQ,
+ 0,
+ NULL,
+ NULL,
+ NULL
+ },
+
+ {
+ 0,
+ 0,
+ "ipa-irq",
+ IORESOURCE_IRQ,
+ 0,
+ NULL,
+ NULL,
+ NULL
+ },
+};
+
+struct dtsi_replacement_resource_table
+ipa3_plat_drv_resource_table[] = {
+ { ipa3_plat_drv_resource_3_5_1,
+ ARRAY_SIZE(ipa3_plat_drv_resource_3_5_1) },
+ { ipa3_plat_drv_resource_4_0,
+ ARRAY_SIZE(ipa3_plat_drv_resource_4_0) },
+};
+
+/*
+ * The following code uses the data above...
+ */
+static u32 emulator_type_to_index(void)
+{
+ /*
+ * Use the input parameter to the IPA driver loadable module,
+ * which specifies the type of hardware the driver is running
+ * on.
+ */
+ u32 index = DTSI_INDEX_4_0;
+ uint emulation_type = ipa3_get_emulation_type();
+
+ switch (emulation_type) {
+ case IPA_HW_v3_5_1:
+ index = DTSI_INDEX_3_5_1;
+ break;
+ case IPA_HW_v4_0:
+ index = DTSI_INDEX_4_0;
+ break;
+ default:
+ break;
+ }
+
+ IPADBG("emulation_type(%u) emulation_index(%u)\n",
+ emulation_type, index);
+
+ return index;
+}
+
+/* From include/linux/of.h */
+/**
+ * emulator_of_property_read_bool - Find from a property
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ *
+ * Search for a property in a device node.
+ * Returns true if the property exists false otherwise.
+ */
+bool emulator_of_property_read_bool(
+ const struct device_node *np,
+ const char *propname)
+{
+ u16 i;
+ u32 index;
+ struct dtsi_replacement_bool *ipa3_plat_drv_boolP;
+
+ /*
+ * Get the index for the type of hardware we're running on.
+ * This is used as a table index.
+ */
+ index = emulator_type_to_index();
+ if (index >= ARRAY_SIZE(ipa3_plat_drv_bool_table)) {
+ IPADBG(
+ "Did not find ipa3_plat_drv_bool_table for index %u\n",
+ index);
+ return false;
+ }
+
+ ipa3_plat_drv_boolP =
+ ipa3_plat_drv_bool_table[index].p_table;
+
+ for (i = 0;
+ i < ipa3_plat_drv_bool_table[index].num_entries;
+ i++) {
+ if (strsame(ipa3_plat_drv_boolP[i].key, propname)) {
+ IPADBG(
+ "Found value %u for propname %s index %u\n",
+ ipa3_plat_drv_boolP[i].value,
+ propname,
+ index);
+ return ipa3_plat_drv_boolP[i].value;
+ }
+ }
+
+ IPADBG("Did not find match for propname %s index %u\n",
+ propname,
+ index);
+
+ return false;
+}
+
+/* From include/linux/of.h */
+int emulator_of_property_read_u32(
+ const struct device_node *np,
+ const char *propname,
+ u32 *out_value)
+{
+ u16 i;
+ u32 index;
+ struct dtsi_replacement_u32 *ipa3_plat_drv_u32P;
+
+ /*
+ * Get the index for the type of hardware we're running on.
+ * This is used as a table index.
+ */
+ index = emulator_type_to_index();
+ if (index >= ARRAY_SIZE(ipa3_plat_drv_u32_table)) {
+ IPADBG(
+ "Did not find ipa3_plat_drv_u32_table for index %u\n",
+ index);
+ return false;
+ }
+
+ ipa3_plat_drv_u32P =
+ ipa3_plat_drv_u32_table[index].p_table;
+
+ for (i = 0;
+ i < ipa3_plat_drv_u32_table[index].num_entries;
+ i++) {
+ if (strsame(ipa3_plat_drv_u32P[i].key, propname)) {
+ *out_value = ipa3_plat_drv_u32P[i].value;
+ IPADBG(
+ "Found value %u for propname %s index %u\n",
+ ipa3_plat_drv_u32P[i].value,
+ propname,
+ index);
+ return 0;
+ }
+ }
+
+ IPADBG("Did not find match for propname %s index %u\n",
+ propname,
+ index);
+
+ return -EINVAL;
+}
+
+/* From include/linux/of.h */
+/**
+ * emulator_of_property_read_u32_array - Find and read an array of 32
+ * bit integers from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 32-bit value(s) from
+ * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u32 value can be decoded.
+ */
+int emulator_of_property_read_u32_array(
+ const struct device_node *np,
+ const char *propname,
+ u32 *out_values,
+ size_t sz)
+{
+ u16 i;
+ u32 index;
+ struct dtsi_replacement_u32_array *u32_arrayP;
+
+ /*
+ * Get the index for the type of hardware we're running on.
+ * This is used as a table index.
+ */
+ index = emulator_type_to_index();
+ if (index >= ARRAY_SIZE(ipa3_plat_drv_u32_array_table)) {
+ IPADBG(
+ "Did not find ipa3_plat_drv_u32_array_table for index %u\n",
+ index);
+ return false;
+ }
+
+ u32_arrayP =
+ ipa3_plat_drv_u32_array_table[index].p_table;
+ for (i = 0;
+ i < ipa3_plat_drv_u32_array_table[index].num_entries;
+ i++) {
+ if (strsame(
+ u32_arrayP[i].key, propname)) {
+ u32 num_elements =
+ u32_arrayP[i].num_elements;
+ u32 *p_element =
+ &u32_arrayP[i].p_value[0];
+ size_t j = 0;
+
+ if (num_elements > sz) {
+ IPAERR(
+ "Found array of %u values for propname %s; only room for %u elements in copy buffer\n",
+ num_elements,
+ propname,
+ (unsigned int) sz);
+ return -EOVERFLOW;
+ }
+
+ while (j++ < num_elements)
+ *out_values++ = *p_element++;
+
+ IPADBG(
+ "Found array of values starting with %u for propname %s index %u\n",
+ u32_arrayP[i].p_value[0],
+ propname,
+ index);
+
+ return 0;
+ }
+ }
+
+ IPADBG("Did not find match for propname %s index %u\n",
+ propname,
+ index);
+
+ return -EINVAL;
+}
+
+/* From drivers/base/platform.c */
+/**
+ * emulator_platform_get_resource_byname - get a resource for a device by name
+ * @dev: platform device
+ * @type: resource type
+ * @name: resource name
+ */
+struct resource *emulator_platform_get_resource_byname(
+ struct platform_device *dev,
+ unsigned int type,
+ const char *name)
+{
+ u16 i;
+ u32 index;
+ struct resource *ipa3_plat_drv_resourceP;
+
+ /*
+ * Get the index for the type of hardware we're running on.
+ * This is used as a table index.
+ */
+ index = emulator_type_to_index();
+ if (index >= ARRAY_SIZE(ipa3_plat_drv_resource_table)) {
+ IPADBG(
+ "Did not find ipa3_plat_drv_resource_table for index %u\n",
+ index);
+ return false;
+ }
+
+ ipa3_plat_drv_resourceP =
+ ipa3_plat_drv_resource_table[index].p_table;
+ for (i = 0;
+ i < ipa3_plat_drv_resource_table[index].num_entries;
+ i++) {
+ struct resource *r = &ipa3_plat_drv_resourceP[i];
+
+ if (type == resource_type(r) && strsame(r->name, name)) {
+ IPADBG(
+ "Found start 0x%x size %u for name %s index %u\n",
+ (unsigned int) (r->start),
+ (unsigned int) (resource_size(r)),
+ name,
+ index);
+ return r;
+ }
+ }
+
+ IPADBG("Did not find match for name %s index %u\n",
+ name,
+ index);
+
+ return NULL;
+}
+
+/* From drivers/of/base.c */
+/**
+ * emulator_of_property_count_elems_of_size - Count the number of
+ * elements in a property
+ *
+ * @np: device node from which the property value is to
+ * be read. Not used.
+ * @propname: name of the property to be searched.
+ * @elem_size: size of the individual element
+ *
+ * Search for a property and count the number of elements of size
+ * elem_size in it. Returns number of elements on success, -EINVAL if
+ * the property does not exist or its length does not match a multiple
+ * of elem_size and -ENODATA if the property does not have a value.
+ */
+int emulator_of_property_count_elems_of_size(
+ const struct device_node *np,
+ const char *propname,
+ int elem_size)
+{
+ u32 index;
+
+ /*
+ * Get the index for the type of hardware we're running on.
+ * This is used as a table index.
+ */
+ index = emulator_type_to_index();
+
+ /*
+ * Use elem_size to determine which table to search for the
+ * specified property name
+ */
+ if (elem_size == sizeof(u32)) {
+ u16 i;
+ struct dtsi_replacement_u32_array *u32_arrayP;
+
+ if (index >= ARRAY_SIZE(ipa3_plat_drv_u32_array_table)) {
+ IPADBG(
+ "Did not find ipa3_plat_drv_u32_array_table for index %u\n",
+ index);
+ return false;
+ }
+
+ u32_arrayP =
+ ipa3_plat_drv_u32_array_table[index].p_table;
+
+ for (i = 0;
+ i < ipa3_plat_drv_u32_array_table[index].num_entries;
+ i++) {
+ if (strsame(u32_arrayP[i].key, propname)) {
+ if (u32_arrayP[i].p_value == NULL) {
+ IPADBG(
+ "Found no elements for propname %s index %u\n",
+ propname,
+ index);
+ return -ENODATA;
+ }
+
+ IPADBG(
+ "Found %u elements for propname %s index %u\n",
+ u32_arrayP[i].num_elements,
+ propname,
+ index);
+
+ return u32_arrayP[i].num_elements;
+ }
+ }
+
+ IPADBG(
+ "Found no match in table with elem_size %d for propname %s index %u\n",
+ elem_size,
+ propname,
+ index);
+
+ return -EINVAL;
+ }
+
+ IPAERR(
+ "Found no tables with element size %u to search for propname %s index %u\n",
+ elem_size,
+ propname,
+ index);
+
+ return -EINVAL;
+}
+
+int emulator_of_property_read_variable_u32_array(
+ const struct device_node *np,
+ const char *propname,
+ u32 *out_values,
+ size_t sz_min,
+ size_t sz_max)
+{
+ return emulator_of_property_read_u32_array(
+ np, propname, out_values, sz_max);
+}
+
+resource_size_t emulator_resource_size(const struct resource *res)
+{
+ return res->end - res->start;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_emulation_stubs.h b/drivers/platform/msm/ipa/ipa_v3/ipa_emulation_stubs.h
new file mode 100644
index 0000000..cf4c7c9
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_emulation_stubs.h
@@ -0,0 +1,125 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(_IPA_EMULATION_STUBS_H_)
+# define _IPA_EMULATION_STUBS_H_
+
+# define clk_get(x, y) ((struct clk *) -(MAX_ERRNO+1))
+# define clk_put(x) do { } while (0)
+# define clk_prepare(x) do { } while (0)
+# define clk_enable(x) do { } while (0)
+# define clk_set_rate(x, y) do { } while (0)
+# define clk_disable_unprepare(x) do { } while (0)
+
+# define outer_flush_range(x, y)
+# define __flush_dcache_area(x, y)
+# define __cpuc_flush_dcache_area(x, y) __flush_dcache_area(x, y)
+
+/* Point several API calls to these new EMULATION functions */
+# define of_property_read_bool(np, propname) \
+ emulator_of_property_read_bool(NULL, propname)
+# define of_property_read_u32(np, propname, out_value) \
+ emulator_of_property_read_u32(NULL, propname, out_value)
+# define of_property_read_u32_array(np, propname, out_values, sz) \
+ emulator_of_property_read_u32_array(NULL, propname, out_values, sz)
+# define platform_get_resource_byname(dev, type, name) \
+ emulator_platform_get_resource_byname(NULL, type, name)
+# define of_property_count_elems_of_size(np, propname, elem_size) \
+ emulator_of_property_count_elems_of_size(NULL, propname, elem_size)
+# define of_property_read_variable_u32_array( \
+ np, propname, out_values, sz_min, sz_max) \
+ emulator_of_property_read_variable_u32_array( \
+ NULL, propname, out_values, sz_min, sz_max)
+# define resource_size(res) \
+ emulator_resource_size(res)
+
+/**
+ * emulator_of_property_read_bool - Findfrom a property
+ * @np: device node used to find the property value. (not used)
+ * @propname: name of the property to be searched.
+ *
+ * Search for a property in a device node.
+ * Returns true if the property exists false otherwise.
+ */
+bool emulator_of_property_read_bool(
+ const struct device_node *np,
+ const char *propname);
+
+int emulator_of_property_read_u32(
+ const struct device_node *np,
+ const char *propname,
+ u32 *out_value);
+
+/**
+ * emulator_of_property_read_u32_array - Find and read an array of 32
+ * bit integers from a property.
+ *
+ * @np: device node used to find the property value. (not used)
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 32-bit value(s) from
+ * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u32 value can be decoded.
+ */
+int emulator_of_property_read_u32_array(
+ const struct device_node *np,
+ const char *propname,
+ u32 *out_values,
+ size_t sz);
+
+/**
+ * emulator_platform_get_resource_byname - get a resource for a device
+ * by name
+ *
+ * @dev: platform device
+ * @type: resource type
+ * @name: resource name
+ */
+struct resource *emulator_platform_get_resource_byname(
+ struct platform_device *dev,
+ unsigned int type,
+ const char *name);
+
+/**
+ * emulator_of_property_count_elems_of_size - Count the number of
+ * elements in a property
+ *
+ * @np: device node used to find the property value. (not used)
+ * @propname: name of the property to be searched.
+ * @elem_size: size of the individual element
+ *
+ * Search for a property and count the number of elements of size
+ * elem_size in it. Returns number of elements on success, -EINVAL if
+ * the property does not exist or its length does not match a multiple
+ * of elem_size and -ENODATA if the property does not have a value.
+ */
+int emulator_of_property_count_elems_of_size(
+ const struct device_node *np,
+ const char *propname,
+ int elem_size);
+
+int emulator_of_property_read_variable_u32_array(
+ const struct device_node *np,
+ const char *propname,
+ u32 *out_values,
+ size_t sz_min,
+ size_t sz_max);
+
+resource_size_t emulator_resource_size(
+ const struct resource *res);
+
+#endif /* #if !defined(_IPA_EMULATION_STUBS_H_) */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index 6742773..6703bf5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -1447,8 +1447,16 @@
}
}
}
- mutex_unlock(&ipa3_ctx->lock);
+ /* commit the change to IPA-HW */
+ if (ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4) ||
+ ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6)) {
+ IPAERR("fail to commit flt-rule\n");
+ WARN_ON_RATELIMIT_IPA(1);
+ mutex_unlock(&ipa3_ctx->lock);
+ return -EPERM;
+ }
+ mutex_unlock(&ipa3_ctx->lock);
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index cecbef0..4196539 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -1171,8 +1171,16 @@
ipa3_ctx->hdr_proc_ctx_tbl.end = end;
IPADBG("hdr_proc_tbl.end = %d\n", end);
}
- mutex_unlock(&ipa3_ctx->lock);
+ /* commit the change to IPA-HW */
+ if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+ IPAERR("fail to commit hdr\n");
+ WARN_ON_RATELIMIT_IPA(1);
+ mutex_unlock(&ipa3_ctx->lock);
+ return -EFAULT;
+ }
+
+ mutex_unlock(&ipa3_ctx->lock);
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 56b5740..268f5fa 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -203,6 +203,188 @@
#define IPA_WDI_TX_DB_RES 7
#define IPA_WDI_MAX_RES 8
+#ifdef CONFIG_ARM64
+/* Outer caches unsupported on ARM64 platforms */
+# define outer_flush_range(x, y)
+# define __cpuc_flush_dcache_area __flush_dcache_area
+#endif
+
+#define IPA_GPIO_IN_QUERY_CLK_IDX 0
+#define IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX 0
+#define IPA_GPIO_OUT_CLK_VOTE_IDX 1
+
+#define IPA_SUMMING_THRESHOLD (0x10)
+#define IPA_PIPE_MEM_START_OFST (0x0)
+#define IPA_PIPE_MEM_SIZE (0x0)
+#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \
+ x == IPA_MODE_MOBILE_AP_WAN || \
+ x == IPA_MODE_MOBILE_AP_WLAN)
+#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL)
+#define IPA_A5_MUX_HEADER_LENGTH (8)
+
+#define IPA_AGGR_MAX_STR_LENGTH (10)
+
+#define CLEANUP_TAG_PROCESS_TIMEOUT 500
+
+#define IPA_AGGR_STR_IN_BYTES(str) \
+ (strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
+
+#define IPA_TRANSPORT_PROD_TIMEOUT_MSEC 100
+
+#define IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048
+
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_EP 0
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3
+
+#define IPA_MHI_GSI_EVENT_RING_ID_START 10
+#define IPA_MHI_GSI_EVENT_RING_ID_END 12
+
+#define IPA_SMEM_SIZE (8 * 1024)
+
+#define IPA_GSI_CHANNEL_HALT_MIN_SLEEP 5000
+#define IPA_GSI_CHANNEL_HALT_MAX_SLEEP 10000
+#define IPA_GSI_CHANNEL_HALT_MAX_TRY 10
+
+/* round addresses for closes page per SMMU requirements */
+#define IPA_SMMU_ROUND_TO_PAGE(iova, pa, size, iova_p, pa_p, size_p) \
+ do { \
+ (iova_p) = rounddown((iova), PAGE_SIZE); \
+ (pa_p) = rounddown((pa), PAGE_SIZE); \
+ (size_p) = roundup((size) + (pa) - (pa_p), PAGE_SIZE); \
+ } while (0)
+
+
+/* The relative location in /lib/firmware where the FWs will reside */
+#define IPA_FWS_PATH "ipa/ipa_fws.elf"
+/*
+ * The following paths below are used when building the system for the
+ * emulation environment or when IPA_EMULATION_COMPILE == 1.
+ *
+ * As new hardware platforms are added into the emulation environment,
+ * please add the appropriate paths here for their firmwares.
+ */
+#define IPA_FWS_PATH_4_0 "ipa/4.0/ipa_fws.elf"
+#define IPA_FWS_PATH_3_5_1 "ipa/3.5.1/ipa_fws.elf"
+
+#ifdef CONFIG_COMPAT
+#define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_HDR, \
+ compat_uptr_t)
+#define IPA_IOC_DEL_HDR32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_HDR, \
+ compat_uptr_t)
+#define IPA_IOC_ADD_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_RT_RULE, \
+ compat_uptr_t)
+#define IPA_IOC_DEL_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_RT_RULE, \
+ compat_uptr_t)
+#define IPA_IOC_ADD_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_FLT_RULE, \
+ compat_uptr_t)
+#define IPA_IOC_DEL_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_FLT_RULE, \
+ compat_uptr_t)
+#define IPA_IOC_GET_RT_TBL32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GET_RT_TBL, \
+ compat_uptr_t)
+#define IPA_IOC_COPY_HDR32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_COPY_HDR, \
+ compat_uptr_t)
+#define IPA_IOC_QUERY_INTF32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_INTF, \
+ compat_uptr_t)
+#define IPA_IOC_QUERY_INTF_TX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_INTF_TX_PROPS, \
+ compat_uptr_t)
+#define IPA_IOC_QUERY_INTF_RX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_INTF_RX_PROPS, \
+ compat_uptr_t)
+#define IPA_IOC_QUERY_INTF_EXT_PROPS32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_INTF_EXT_PROPS, \
+ compat_uptr_t)
+#define IPA_IOC_GET_HDR32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GET_HDR, \
+ compat_uptr_t)
+#define IPA_IOC_ALLOC_NAT_MEM32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ALLOC_NAT_MEM, \
+ compat_uptr_t)
+#define IPA_IOC_ALLOC_NAT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ALLOC_NAT_TABLE, \
+ compat_uptr_t)
+#define IPA_IOC_ALLOC_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ALLOC_IPV6CT_TABLE, \
+ compat_uptr_t)
+#define IPA_IOC_V4_INIT_NAT32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_V4_INIT_NAT, \
+ compat_uptr_t)
+#define IPA_IOC_INIT_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_INIT_IPV6CT_TABLE, \
+ compat_uptr_t)
+#define IPA_IOC_TABLE_DMA_CMD32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_TABLE_DMA_CMD, \
+ compat_uptr_t)
+#define IPA_IOC_V4_DEL_NAT32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_V4_DEL_NAT, \
+ compat_uptr_t)
+#define IPA_IOC_DEL_NAT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_NAT_TABLE, \
+ compat_uptr_t)
+#define IPA_IOC_DEL_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_IPV6CT_TABLE, \
+ compat_uptr_t)
+#define IPA_IOC_NAT_MODIFY_PDN32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_NAT_MODIFY_PDN, \
+ compat_uptr_t)
+#define IPA_IOC_GET_NAT_OFFSET32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GET_NAT_OFFSET, \
+ compat_uptr_t)
+#define IPA_IOC_PULL_MSG32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_PULL_MSG, \
+ compat_uptr_t)
+#define IPA_IOC_RM_ADD_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_RM_ADD_DEPENDENCY, \
+ compat_uptr_t)
+#define IPA_IOC_RM_DEL_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_RM_DEL_DEPENDENCY, \
+ compat_uptr_t)
+#define IPA_IOC_GENERATE_FLT_EQ32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_GENERATE_FLT_EQ, \
+ compat_uptr_t)
+#define IPA_IOC_QUERY_RT_TBL_INDEX32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_QUERY_RT_TBL_INDEX, \
+ compat_uptr_t)
+#define IPA_IOC_WRITE_QMAPID32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_WRITE_QMAPID, \
+ compat_uptr_t)
+#define IPA_IOC_MDFY_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_MDFY_FLT_RULE, \
+ compat_uptr_t)
+#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \
+ compat_uptr_t)
+#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \
+ compat_uptr_t)
+#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \
+ compat_uptr_t)
+#define IPA_IOC_ADD_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_HDR_PROC_CTX, \
+ compat_uptr_t)
+#define IPA_IOC_DEL_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_HDR_PROC_CTX, \
+ compat_uptr_t)
+#define IPA_IOC_MDFY_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_MDFY_RT_RULE, \
+ compat_uptr_t)
+#endif /* #ifdef CONFIG_COMPAT */
+
+#define IPA_TZ_UNLOCK_ATTRIBUTE 0x0C0311
+#define TZ_MEM_PROTECT_REGION_ID 0x10
+
struct ipa3_active_client_htable_entry {
struct hlist_node list;
char id_string[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN];
@@ -558,6 +740,8 @@
* @qmi_request_sent: Indicates whether QMI request to enable clear data path
* request is sent or not.
* @napi_enabled: when true, IPA call client callback to start polling
+ * @client_lock_unlock: callback function to take mutex lock/unlock for USB
+ * clients
*/
struct ipa3_ep_context {
int valid;
@@ -590,6 +774,8 @@
u32 eot_in_poll_err;
bool ep_delay_set;
+ int (*client_lock_unlock)(bool is_lock);
+
/* sys MUST be the last element of this struct */
struct ipa3_sys_context *sys;
};
@@ -912,11 +1098,13 @@
* @IPA_HW_Normal: Regular IPA hardware
* @IPA_HW_Virtual: IPA hardware supporting virtual memory allocation
* @IPA_HW_PCIE: IPA hardware supporting memory allocation over PCIE Bridge
+ * @IPA_HW_Emulation: IPA emulation hardware
*/
enum ipa3_hw_mode {
- IPA_HW_MODE_NORMAL = 0,
- IPA_HW_MODE_VIRTUAL = 1,
- IPA_HW_MODE_PCIE = 2
+ IPA_HW_MODE_NORMAL = 0,
+ IPA_HW_MODE_VIRTUAL = 1,
+ IPA_HW_MODE_PCIE = 2,
+ IPA_HW_MODE_EMULATION = 3,
};
enum ipa3_config_this_ep {
@@ -1207,6 +1395,7 @@
* @mode: IPA operating mode
* @mmio: iomem
* @ipa_wrapper_base: IPA wrapper base address
+ * @ipa_wrapper_size: size of the memory pointed to by ipa_wrapper_base
* @hdr_tbl: IPA header table
* @hdr_proc_ctx_tbl: IPA processing context table
* @rt_tbl_set: list of routing tables each of which is a list of rules
@@ -1423,6 +1612,9 @@
u32 ipa_mem_size;
u32 transport_mem_base;
u32 transport_mem_size;
+ u32 emulator_intcntrlr_mem_base;
+ u32 emulator_intcntrlr_mem_size;
+ u32 emulator_irq;
u32 ipa_irq;
u32 transport_irq;
u32 ipa_pipe_mem_start_ofst;
@@ -1726,6 +1918,9 @@
int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id);
void ipa3_xdci_ep_delay_rm(u32 clnt_hdl);
+void ipa3_register_lock_unlock_callback(int (*client_cb)(bool), u32 ipa_ep_idx);
+void ipa3_deregister_lock_unlock_callback(u32 ipa_ep_idx);
+void ipa3_set_usb_prod_pipe_delay(void);
int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
bool should_force_clear, u32 qmi_req_id, bool is_dpl);
@@ -2355,6 +2550,10 @@
void ipa3_inc_acquire_wakelock(void);
void ipa3_dec_release_wakelock(void);
int ipa3_load_fws(const struct firmware *firmware, phys_addr_t gsi_mem_base);
+int emulator_load_fws(
+ const struct firmware *firmware,
+ u32 transport_mem_base,
+ u32 transport_mem_size);
int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data);
const char *ipa_hw_error_str(enum ipa3_hw_errors err_type);
int ipa_gsi_ch20_wa(void);
@@ -2381,4 +2580,9 @@
void ipa3_init_imm_cmd_desc(struct ipa3_desc *desc,
struct ipahal_imm_cmd_pyld *cmd_pyld);
int ipa3_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res);
+uint ipa3_get_emulation_type(void);
+int ipa3_get_transport_info(
+ phys_addr_t *phys_addr_ptr,
+ unsigned long *size_ptr);
+irq_handler_t ipa3_get_isr(void);
#endif /* _IPA3_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
index d69d6ae..46b7434 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -339,6 +339,12 @@
ipa3_dec_client_disable_clks_no_block(&log_info);
return IRQ_HANDLED;
}
+
+irq_handler_t ipa3_get_isr(void)
+{
+ return ipa3_isr;
+}
+
/**
* ipa3_add_interrupt_handler() - Adds handler to an interrupt type
* @interrupt: Interrupt type
@@ -486,21 +492,35 @@
return -ENOMEM;
}
- res = request_irq(ipa_irq, (irq_handler_t) ipa3_isr,
- IRQF_TRIGGER_RISING, "ipa", ipa_dev);
- if (res) {
- IPAERR("fail to register IPA IRQ handler irq=%d\n", ipa_irq);
- return -ENODEV;
+ /*
+ * NOTE:
+ *
+ * We'll only register an isr on non-emulator (ie. real UE)
+ * systems.
+ *
+ * On the emulator, emulator_soft_irq_isr() will be calling
+ * ipa3_isr, so hence, no isr registration here, and instead,
+ * we'll pass the address of ipa3_isr to the gsi layer where
+ * emulator interrupts are handled...
+ */
+ if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) {
+ res = request_irq(ipa_irq, (irq_handler_t) ipa3_isr,
+ IRQF_TRIGGER_RISING, "ipa", ipa_dev);
+ if (res) {
+ IPAERR(
+ "fail to register IPA IRQ handler irq=%d\n",
+ ipa_irq);
+ return -ENODEV;
+ }
+ IPADBG("IPA IRQ handler irq=%d registered\n", ipa_irq);
+
+ res = enable_irq_wake(ipa_irq);
+ if (res)
+ IPAERR("fail to enable IPA IRQ wakeup irq=%d res=%d\n",
+ ipa_irq, res);
+ else
+ IPADBG("IPA IRQ wakeup enabled irq=%d\n", ipa_irq);
}
- IPADBG("IPA IRQ handler irq=%d registered\n", ipa_irq);
-
- res = enable_irq_wake(ipa_irq);
- if (res)
- IPAERR("fail to enable IPA IRQ wakeup irq=%d res=%d\n",
- ipa_irq, res);
- else
- IPADBG("IPA IRQ wakeup enabled irq=%d\n", ipa_irq);
-
spin_lock_init(&suspend_wa_lock);
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
index b48f2c4..7065e2c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
@@ -1258,6 +1258,11 @@
goto bail;
}
+ if (!ipa3_ctx->nat_mem.dev.is_dev_init) {
+ IPAERR_RL("NAT hasn't been initialized\n");
+ return -EPERM;
+ }
+
for (cnt = 0; cnt < dma->entries; ++cnt) {
result = ipa3_table_validate_table_dma_one(&dma->dma[cnt]);
if (result) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h
index 0772dde..47a03f9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h
@@ -16,7 +16,7 @@
#include <linux/msm_ipa.h>
/* internal to ipa */
-#define IPA_PM_MAX_CLIENTS 12 /* actual max is value -1 since we start from 1*/
+#define IPA_PM_MAX_CLIENTS 32 /* actual max is value -1 since we start from 1*/
#define IPA_PM_MAX_EX_CL 64
#define IPA_PM_THRESHOLD_MAX 5
#define IPA_PM_EXCEPTION_MAX 2
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
index 3351a33..3210a70 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -399,7 +399,7 @@
static inline void ipa3_q6_handshake_complete(bool ssr_bootup) { }
-static inline int ipa3_wwan_set_modem_perf_profile(int throughput)
+static inline int ipa3_wwan_set_modem_perf_profile(int throughput);
static inline int ipa3_qmi_enable_per_client_stats(
struct ipa_enable_per_client_stats_req_msg_v01 *req,
struct ipa_enable_per_client_stats_resp_msg_v01 *resp)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index 7861896..736c0fb 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -1577,6 +1577,15 @@
}
}
}
+
+ /* commit the change to IPA-HW */
+ if (ipa3_ctx->ctrl->ipa3_commit_rt(IPA_IP_v4) ||
+ ipa3_ctx->ctrl->ipa3_commit_rt(IPA_IP_v6)) {
+ IPAERR("fail to commit rt-rule\n");
+ WARN_ON_RATELIMIT_IPA(1);
+ mutex_unlock(&ipa3_ctx->lock);
+ return -EPERM;
+ }
mutex_unlock(&ipa3_ctx->lock);
return 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 5d6d3cd..61c81cd 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -24,6 +24,13 @@
#include "ipahal/ipahal_hw_stats.h"
#include "../ipa_rm_i.h"
+/*
+ * The following for adding code (ie. for EMULATION) not found on x86.
+ */
+#if IPA_EMULATION_COMPILE == 1
+# include "ipa_emulation_stubs.h"
+#endif
+
#define IPA_V3_0_CLK_RATE_SVS2 (37.5 * 1000 * 1000UL)
#define IPA_V3_0_CLK_RATE_SVS (75 * 1000 * 1000UL)
#define IPA_V3_0_CLK_RATE_NOMINAL (150 * 1000 * 1000UL)
@@ -964,12 +971,12 @@
/* IPA_3_5_1 */
[IPA_3_5_1][IPA_CLIENT_WLAN1_PROD] = {
true, IPA_v3_5_GROUP_UL_DL, true,
- IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
QMB_MASTER_SELECT_DDR,
{ 7, 1, 8, 16, IPA_EE_UC } },
[IPA_3_5_1][IPA_CLIENT_USB_PROD] = {
true, IPA_v3_5_GROUP_UL_DL, true,
- IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
QMB_MASTER_SELECT_DDR,
{ 0, 0, 8, 16, IPA_EE_AP } },
[IPA_3_5_1][IPA_CLIENT_APPS_LAN_PROD] = {
@@ -979,7 +986,7 @@
{ 8, 7, 8, 16, IPA_EE_AP } },
[IPA_3_5_1][IPA_CLIENT_APPS_WAN_PROD] = {
true, IPA_v3_5_GROUP_UL_DL, true,
- IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
QMB_MASTER_SELECT_DDR,
{ 2, 3, 16, 32, IPA_EE_AP } },
[IPA_3_5_1][IPA_CLIENT_APPS_CMD_PROD] = {
@@ -1984,10 +1991,13 @@
max_writes.qmb_1_max_writes = 2;
if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5) {
- max_writes.qmb_1_max_writes = 4;
max_reads.qmb_1_max_reads = 12;
+ max_writes.qmb_1_max_writes = 4;
}
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+ max_reads.qmb_0_max_reads = 12;
+
ipahal_write_reg_fields(IPA_QSB_MAX_WRITES, &max_writes);
ipahal_write_reg_fields(IPA_QSB_MAX_READS, &max_reads);
}
@@ -2322,6 +2332,32 @@
}
/**
+ * ipa3_get_client_by_pipe() - return client type relative to pipe
+ * index
+ * @pipe_idx: IPA end-point number
+ *
+ * Return value: client type
+ */
+static enum ipa_client_type ipa3_get_client_by_pipe(int pipe_idx)
+{
+ int j = 0;
+
+ for (j = 0; j < IPA_CLIENT_MAX; j++) {
+ const struct ipa_ep_configuration *iec_ptr =
+ &(ipa3_ep_mapping[ipa3_get_hw_type_index()][j]);
+ if (iec_ptr->valid &&
+ iec_ptr->ipa_gsi_ep_info.ipa_ep_num == pipe_idx)
+ break;
+ }
+
+ if (j == IPA_CLIENT_MAX)
+ IPADBG("ipa3_get_client_by_pipe(%d) can't find client\n",
+ pipe_idx);
+
+ return j;
+}
+
+/**
* ipa_init_ep_flt_bitmap() - Initialize the bitmap
* that represents the End-points that supports filtering
*/
@@ -4870,7 +4906,8 @@
}
/* move resource group configuration from HLOS to TZ */
- if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1) {
+ if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION &&
+ ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1) {
IPAERR("skip configuring ipa_rx_hps_clients from HLOS\n");
return;
}
@@ -5179,6 +5216,77 @@
}
/**
+ * emulator_load_single_fw() - load firmware into emulator's memory
+ *
+ * @firmware: Structure which contains the FW data from the user space.
+ * @phdr: ELF program header
+ * @fw_base: memory location to which firmware should get loaded
+ * @offset_from_base: offset to start relative to fw_base
+ *
+ * Return value: 0 on success, negative otherwise
+ */
+static int emulator_load_single_fw(
+ const struct firmware *firmware,
+ const struct elf32_phdr *phdr,
+ void __iomem *fw_base,
+ uint32_t offset_from_base)
+{
+ int index;
+ uint32_t ofb;
+ const uint32_t *elf_data_ptr;
+
+ IPADBG("firmware(%pK) phdr(%pK) fw_base(%pK) offset_from_base(0x%x)\n",
+ firmware, phdr, fw_base, offset_from_base);
+
+ if (phdr->p_offset > firmware->size) {
+ IPAERR("Invalid ELF: offset=%u is beyond elf_size=%zu\n",
+ phdr->p_offset, firmware->size);
+ return -EINVAL;
+ }
+ if ((firmware->size - phdr->p_offset) < phdr->p_filesz) {
+ IPAERR("Invalid ELF: offset=%u filesz=%u elf_size=%zu\n",
+ phdr->p_offset, phdr->p_filesz, firmware->size);
+ return -EINVAL;
+ }
+
+ if (phdr->p_memsz % sizeof(uint32_t)) {
+ IPAERR("FW mem size %u doesn't align to 32bit\n",
+ phdr->p_memsz);
+ return -EFAULT;
+ }
+
+ if (phdr->p_filesz > phdr->p_memsz) {
+ IPAERR("FW image too big src_size=%u dst_size=%u\n",
+ phdr->p_filesz, phdr->p_memsz);
+ return -EFAULT;
+ }
+
+ IPADBG("ELF: p_memsz(0x%x) p_filesz(0x%x) p_filesz/4(0x%x)\n",
+ (uint32_t) phdr->p_memsz,
+ (uint32_t) phdr->p_filesz,
+ (uint32_t) (phdr->p_filesz/sizeof(uint32_t)));
+
+ /* Set the entire region to 0s */
+ ofb = offset_from_base;
+ for (index = 0; index < phdr->p_memsz/sizeof(uint32_t); index++) {
+ writel_relaxed(0, fw_base + ofb);
+ ofb += sizeof(uint32_t);
+ }
+
+ elf_data_ptr = (uint32_t *)(firmware->data + phdr->p_offset);
+
+ /* Write the FW */
+ ofb = offset_from_base;
+ for (index = 0; index < phdr->p_filesz/sizeof(uint32_t); index++) {
+ writel_relaxed(*elf_data_ptr, fw_base + ofb);
+ elf_data_ptr++;
+ ofb += sizeof(uint32_t);
+ }
+
+ return 0;
+}
+
+/**
* ipa3_load_fws() - Load the IPAv3 FWs into IPA&GSI SRAM.
*
* @firmware: Structure which contains the FW data from the user space.
@@ -5286,6 +5394,240 @@
return 0;
}
+/*
+ * The following needed for the EMULATION system. On a non-emulation
+ * system (ie. the real UE), this is functionality is done in the
+ * TZ...
+ */
+#define IPA_SPARE_REG_1_VAL (0xC0000805)
+
+static void ipa_gsi_setup_reg(void)
+{
+ u32 reg_val, start;
+ int i;
+ const struct ipa_gsi_ep_config *gsi_ep_info_cfg;
+ enum ipa_client_type type;
+
+ IPADBG("Setting up registers in preparation for firmware download\n");
+
+ /* enable GSI interface */
+ ipahal_write_reg(IPA_GSI_CONF, 1);
+
+ /*
+ * Before configuring the FIFOs need to unset bit 30 in the
+ * spare register
+ */
+ ipahal_write_reg(IPA_SPARE_REG_1_OFST,
+ (IPA_SPARE_REG_1_VAL & (~(1 << 30))));
+
+ /* setup IPA_ENDP_GSI_CFG_TLV_n reg */
+ start = 0;
+ ipa3_ctx->ipa_num_pipes = ipa3_get_num_pipes();
+ IPADBG("ipa_num_pipes=%u\n", ipa3_ctx->ipa_num_pipes);
+
+ for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+ type = ipa3_get_client_by_pipe(i);
+ gsi_ep_info_cfg = ipa3_get_gsi_ep_info(type);
+ IPADBG("for ep %d client is %d\n", i, type);
+ if (!gsi_ep_info_cfg)
+ continue;
+ IPADBG("Config is true");
+ reg_val = (gsi_ep_info_cfg->ipa_if_tlv << 16) + start;
+ start += gsi_ep_info_cfg->ipa_if_tlv;
+ ipahal_write_reg_n(IPA_ENDP_GSI_CFG_TLV_OFST_n, i, reg_val);
+ }
+
+ /* setup IPA_ENDP_GSI_CFG_AOS_n reg */
+ start = 0;
+ for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+ type = ipa3_get_client_by_pipe(i);
+ gsi_ep_info_cfg = ipa3_get_gsi_ep_info(type);
+ if (!gsi_ep_info_cfg)
+ continue;
+ reg_val = (gsi_ep_info_cfg->ipa_if_aos << 16) + start;
+ start += gsi_ep_info_cfg->ipa_if_aos;
+ ipahal_write_reg_n(IPA_ENDP_GSI_CFG_AOS_OFST_n, i, reg_val);
+ }
+
+ /* setup IPA_ENDP_GSI_CFG1_n reg */
+ for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+ type = ipa3_get_client_by_pipe(i);
+ gsi_ep_info_cfg = ipa3_get_gsi_ep_info(type);
+ if (!gsi_ep_info_cfg)
+ continue;
+ reg_val = (1 << 16) +
+ ((u32)gsi_ep_info_cfg->ipa_gsi_chan_num << 8) +
+ gsi_ep_info_cfg->ee;
+ ipahal_write_reg_n(IPA_ENDP_GSI_CFG1_OFST_n, i, reg_val);
+ }
+
+ /*
+ * Setup IPA_ENDP_GSI_CFG2_n reg: this register must be setup
+ * as last one
+ */
+ for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+ type = ipa3_get_client_by_pipe(i);
+ gsi_ep_info_cfg = ipa3_get_gsi_ep_info(type);
+ if (!gsi_ep_info_cfg)
+ continue;
+ reg_val = 1 << 31;
+ ipahal_write_reg_n(IPA_ENDP_GSI_CFG2_OFST_n, i, reg_val);
+ reg_val = 0;
+ ipahal_write_reg_n(IPA_ENDP_GSI_CFG2_OFST_n, i, reg_val);
+ }
+
+ /*
+ * After configuring the FIFOs need to set bit 30 in the spare
+ * register
+ */
+ ipahal_write_reg(IPA_SPARE_REG_1_OFST,
+ (IPA_SPARE_REG_1_VAL | (1 << 30)));
+}
+
+/**
+ * emulator_load_fws() - Load the IPAv3 FWs into IPA&GSI SRAM.
+ *
+ * @firmware: Structure which contains the FW data from the user space.
+ *
+ * Return value: 0 on success, negative otherwise
+ *
+ */
+int emulator_load_fws(
+ const struct firmware *firmware,
+ u32 transport_mem_base,
+ u32 transport_mem_size)
+{
+ const struct elf32_hdr *ehdr;
+ const struct elf32_phdr *phdr;
+ void __iomem *gsi_base;
+ uint32_t hps_seq_offs, dps_seq_offs;
+ unsigned long gsi_offset;
+ int rc;
+
+ IPADBG("Loading firmware(%pK)\n", firmware);
+
+ if (!firmware) {
+ IPAERR("firmware pointer passed to function is NULL\n");
+ return -EINVAL;
+ }
+
+ /* One program header per FW image: GSI, DPS and HPS */
+ if (firmware->size < (sizeof(*ehdr) + 3 * sizeof(*phdr))) {
+ IPAERR(
+ "Missing ELF and Program headers firmware size=%zu\n",
+ firmware->size);
+ return -EINVAL;
+ }
+
+ ehdr = (struct elf32_hdr *) firmware->data;
+
+ ipa_assert_on(!ehdr);
+
+ if (ehdr->e_phnum != 3) {
+ IPAERR("Unexpected number of ELF program headers\n");
+ return -EINVAL;
+ }
+
+ hps_seq_offs = ipahal_get_reg_ofst(IPA_HPS_SEQUENCER_FIRST);
+ dps_seq_offs = ipahal_get_reg_ofst(IPA_DPS_SEQUENCER_FIRST);
+
+ /*
+ * Each ELF program header represents a FW image and contains:
+ * p_vaddr : The starting address to which the FW needs to loaded.
+ * p_memsz : The size of the IRAM (where the image loaded)
+ * p_filesz: The size of the FW image embedded inside the ELF
+ * p_offset: Absolute offset to the image from the head of the ELF
+ *
+ * NOTE WELL: On the emulation platform, the p_vaddr address
+ * is not relevant and is unused. This is because
+ * on the emulation platform, the registers'
+ * address location is mutable, since it's mapped
+ * in via a PCIe probe. Given this, it is the
+ * mapped address info that's used while p_vaddr is
+ * ignored.
+ */
+ phdr = (struct elf32_phdr *)(firmware->data + sizeof(*ehdr));
+
+ phdr += 2;
+
+ /*
+ * Attempt to load IPA HPS FW image
+ */
+ if (phdr->p_memsz > ipahal_get_hps_img_mem_size()) {
+ IPAERR("Invalid IPA HPS img size memsz=%d dps_mem_size=%u\n",
+ phdr->p_memsz, ipahal_get_hps_img_mem_size());
+ return -EINVAL;
+ }
+ IPADBG("Loading HPS FW\n");
+ rc = emulator_load_single_fw(
+ firmware, phdr, ipa3_ctx->mmio, hps_seq_offs);
+ if (rc)
+ return rc;
+ IPADBG("Loading HPS FW complete\n");
+
+ --phdr;
+
+ /*
+ * Attempt to load IPA DPS FW image
+ */
+ if (phdr->p_memsz > ipahal_get_dps_img_mem_size()) {
+ IPAERR("Invalid IPA DPS img size memsz=%d dps_mem_size=%u\n",
+ phdr->p_memsz, ipahal_get_dps_img_mem_size());
+ return -EINVAL;
+ }
+ IPADBG("Loading DPS FW\n");
+ rc = emulator_load_single_fw(
+ firmware, phdr, ipa3_ctx->mmio, dps_seq_offs);
+ if (rc)
+ return rc;
+ IPADBG("Loading DPS FW complete\n");
+
+ /*
+ * Run gsi register setup which is normally done in TZ on
+ * non-EMULATION systems...
+ */
+ ipa_gsi_setup_reg();
+
+ /*
+ * Map to the GSI base...
+ */
+ gsi_base = ioremap_nocache(transport_mem_base, transport_mem_size);
+
+ IPADBG("GSI base(0x%x) mapped to (%pK) with len (0x%x)\n",
+ transport_mem_base,
+ gsi_base,
+ transport_mem_size);
+
+ if (!gsi_base) {
+ IPAERR("ioremap_nocache failed\n");
+ return -EFAULT;
+ }
+
+ --phdr;
+
+ /*
+ * Attempt to load GSI FW image
+ */
+ if (phdr->p_memsz > transport_mem_size) {
+ IPAERR(
+ "Invalid GSI FW img size memsz=%d transport_mem_size=%u\n",
+ phdr->p_memsz, transport_mem_size);
+ return -EINVAL;
+ }
+ IPADBG("Loading GSI FW\n");
+ gsi_get_inst_ram_offset_and_size(&gsi_offset, NULL);
+ rc = emulator_load_single_fw(
+ firmware, phdr, gsi_base, (uint32_t) gsi_offset);
+ iounmap(gsi_base);
+ if (rc)
+ return rc;
+ IPADBG("Loading GSI FW complete\n");
+
+ IPADBG("IPA FWs (GSI FW, DPS and HPS) loaded successfully\n");
+
+ return 0;
+}
+
/**
* ipa3_is_msm_device() - Is the running device a MSM or MDM?
* Determine according to IPA version
@@ -5379,4 +5721,3 @@
desc->len = cmd_pyld->len;
desc->type = IPA_IMM_CMD_DESC;
}
-
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
index 1254fe3..530adef 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,6 +10,7 @@
* GNU General Public License for more details.
*/
+#include <linux/errno.h>
#include <linux/ipc_logging.h>
#include <linux/debugfs.h>
#include <linux/ipa.h>
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index ce59488..38132d2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -133,6 +133,12 @@
__stringify(IPA_FEC_ATTR_EE_n),
__stringify(IPA_MBIM_DEAGGR_FEC_ATTR_EE_n),
__stringify(IPA_GEN_DEAGGR_FEC_ATTR_EE_n),
+ __stringify(IPA_GSI_CONF),
+ __stringify(IPA_ENDP_GSI_CFG1_OFST_n),
+ __stringify(IPA_ENDP_GSI_CFG2_OFST_n),
+ __stringify(IPA_ENDP_GSI_CFG_AOS_OFST_n),
+ __stringify(IPA_ENDP_GSI_CFG_TLV_OFST_n),
+ __stringify(IPA_SPARE_REG_1_OFST),
};
static void ipareg_construct_dummy(enum ipahal_reg_name reg,
@@ -1971,6 +1977,24 @@
[IPA_HW_v3_5][IPA_HPS_FTCH_ARB_QUEUE_WEIGHT] = {
ipareg_construct_hps_queue_weights,
ipareg_parse_hps_queue_weights, 0x000005a4, 0, 0, 0, 0},
+ [IPA_HW_v3_5][IPA_GSI_CONF] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00002790, 0x0, 0, 0, 0 },
+ [IPA_HW_v3_5][IPA_ENDP_GSI_CFG1_OFST_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00002794, 0x4, 0, 0, 0 },
+ [IPA_HW_v3_5][IPA_ENDP_GSI_CFG2_OFST_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00002A2C, 0x4, 0, 0, 0 },
+ [IPA_HW_v3_5][IPA_ENDP_GSI_CFG_AOS_OFST_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x000029A8, 0x4, 0, 0, 0 },
+ [IPA_HW_v3_5][IPA_ENDP_GSI_CFG_TLV_OFST_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00002924, 0x4, 0, 0, 0 },
+ [IPA_HW_v3_5][IPA_SPARE_REG_1_OFST] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00002780, 0x0, 0, 0, 0 },
/* IPAv4.0 */
[IPA_HW_v4_0][IPA_IRQ_SUSPEND_INFO_EE_n] = {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
index 7e8e8ba..da5bbbf 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
@@ -134,6 +134,12 @@
IPA_FEC_ATTR_EE_n,
IPA_MBIM_DEAGGR_FEC_ATTR_EE_n,
IPA_GEN_DEAGGR_FEC_ATTR_EE_n,
+ IPA_GSI_CONF,
+ IPA_ENDP_GSI_CFG1_OFST_n,
+ IPA_ENDP_GSI_CFG2_OFST_n,
+ IPA_ENDP_GSI_CFG_AOS_OFST_n,
+ IPA_ENDP_GSI_CFG_TLV_OFST_n,
+ IPA_SPARE_REG_1_OFST,
IPA_REG_MAX,
};
@@ -641,4 +647,3 @@
struct ipahal_reg_valmask *valmask);
#endif /* _IPAHAL_REG_H_ */
-
diff --git a/drivers/platform/msm/ipa/test/Makefile b/drivers/platform/msm/ipa/test/Makefile
index 82bee5d..e18e534 100644
--- a/drivers/platform/msm/ipa/test/Makefile
+++ b/drivers/platform/msm/ipa/test/Makefile
@@ -1,2 +1,8 @@
+ifdef CONFIG_X86
+ccflags-y += -DIPA_EMULATION_COMPILE=1
+else
+ccflags-y += -DIPA_EMULATION_COMPILE=0
+endif
+
obj-$(CONFIG_IPA_UT) += ipa_ut_mod.o
ipa_ut_mod-y := ipa_ut_framework.o ipa_test_example.o ipa_test_mhi.o ipa_test_dma.o ipa_test_hw_stats.o ipa_pm_ut.o
diff --git a/drivers/platform/msm/ipa/test/ipa_test_mhi.c b/drivers/platform/msm/ipa/test/ipa_test_mhi.c
index 212557c..7f65956 100644
--- a/drivers/platform/msm/ipa/test/ipa_test_mhi.c
+++ b/drivers/platform/msm/ipa/test/ipa_test_mhi.c
@@ -327,6 +327,8 @@
u32 prod_hdl;
u32 cons_hdl;
u32 test_prod_hdl;
+ phys_addr_t transport_phys_addr;
+ unsigned long transport_size;
};
static struct ipa_test_mhi_context *test_mhi_ctx;
@@ -780,11 +782,6 @@
IPA_UT_DBG("Start Setup\n");
- if (!gsi_ctx) {
- IPA_UT_ERR("No GSI ctx\n");
- return -EINVAL;
- }
-
if (!ipa3_ctx) {
IPA_UT_ERR("No IPA ctx\n");
return -EINVAL;
@@ -797,11 +794,20 @@
return -ENOMEM;
}
- test_mhi_ctx->gsi_mmio = ioremap_nocache(gsi_ctx->per.phys_addr,
- gsi_ctx->per.size);
- if (!test_mhi_ctx) {
+ rc = ipa3_get_transport_info(&test_mhi_ctx->transport_phys_addr,
+ &test_mhi_ctx->transport_size);
+ if (rc != 0) {
+ IPA_UT_ERR("ipa3_get_transport_info() failed\n");
+ rc = -EFAULT;
+ goto fail_free_ctx;
+ }
+
+ test_mhi_ctx->gsi_mmio =
+ ioremap_nocache(test_mhi_ctx->transport_phys_addr,
+ test_mhi_ctx->transport_size);
+ if (!test_mhi_ctx->gsi_mmio) {
IPA_UT_ERR("failed to remap GSI HW size=%lu\n",
- gsi_ctx->per.size);
+ test_mhi_ctx->transport_size);
rc = -EFAULT;
goto fail_free_ctx;
}
@@ -1385,7 +1391,7 @@
/* write value to event ring doorbell */
IPA_UT_LOG("DB to event 0x%llx: base %pa ofst 0x%x\n",
p_events[event_ring_index].wp,
- &(gsi_ctx->per.phys_addr),
+ &(test_mhi_ctx->transport_phys_addr),
GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(
event_ring_index + ipa3_ctx->mhi_evid_limits[0], 0));
iowrite32(p_events[event_ring_index].wp,
@@ -1432,7 +1438,7 @@
IPA_UT_LOG(
"DB to channel 0x%llx: base %pa ofst 0x%x\n"
, p_channels[channel_idx].wp
- , &(gsi_ctx->per.phys_addr)
+ , &(test_mhi_ctx->transport_phys_addr)
, GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(
channel_idx, 0));
iowrite32(p_channels[channel_idx].wp,
@@ -3324,4 +3330,3 @@
ipa_mhi_test_in_loop_channel_reset_ipa_holb,
true, IPA_HW_v3_0, IPA_HW_MAX),
} IPA_UT_DEFINE_SUITE_END(mhi);
-
diff --git a/drivers/platform/msm/ipa/test/ipa_ut_framework.c b/drivers/platform/msm/ipa/test/ipa_ut_framework.c
index bcbcd87..dad3ec9 100644
--- a/drivers/platform/msm/ipa/test/ipa_ut_framework.c
+++ b/drivers/platform/msm/ipa/test/ipa_ut_framework.c
@@ -1036,9 +1036,10 @@
* If IPA driver already ready, continue initialization immediately.
* if not, wait for IPA ready notification by IPA driver context
*/
-static int __init ipa_ut_module_init(void)
+int __init ipa_ut_module_init(void)
{
- int ret;
+ int ret = 0;
+ bool init_framewok = true;
IPA_UT_INFO("Loading IPA test module...\n");
@@ -1050,14 +1051,34 @@
mutex_init(&ipa_ut_ctx->lock);
if (!ipa_is_ready()) {
+ init_framewok = false;
+
IPA_UT_DBG("IPA driver not ready, registering callback\n");
+
ret = ipa_register_ipa_ready_cb(ipa_ut_ipa_ready_cb, NULL);
/*
- * If we received -EEXIST, IPA has initialized. So we need
- * to continue the initing process.
+ * If the call to ipa_register_ipa_ready_cb() above
+ * returns 0, this means that we've succeeded in
+ * queuing up a future call to ipa_ut_framework_init()
+ * and that the call to it will be made once the IPA
+ * becomes ready. If this is the case, the call to
+ * ipa_ut_framework_init() below need not be made.
+ *
+ * If the call to ipa_register_ipa_ready_cb() above
+ * returns -EEXIST, it means that during the call to
+ * ipa_register_ipa_ready_cb(), the IPA has become
+ * ready, and hence, no indirect call to
+ * ipa_ut_framework_init() will be made, so we need to
+ * call it ourselves below.
+ *
+ * If the call to ipa_register_ipa_ready_cb() above
+ * return something other than 0 or -EEXIST, that's a
+ * hard error.
*/
- if (ret != -EEXIST) {
+ if (ret == -EEXIST) {
+ init_framewok = true;
+ } else {
if (ret) {
IPA_UT_ERR("IPA CB reg failed - %d\n", ret);
kfree(ipa_ut_ctx);
@@ -1067,12 +1088,15 @@
}
}
- ret = ipa_ut_framework_init();
- if (ret) {
- IPA_UT_ERR("framework init failed\n");
- kfree(ipa_ut_ctx);
- ipa_ut_ctx = NULL;
+ if (init_framewok) {
+ ret = ipa_ut_framework_init();
+ if (ret) {
+ IPA_UT_ERR("framework init failed\n");
+ kfree(ipa_ut_ctx);
+ ipa_ut_ctx = NULL;
+ }
}
+
return ret;
}
@@ -1081,7 +1105,7 @@
*
* Destroys the Framework and removes its context
*/
-static void ipa_ut_module_exit(void)
+void ipa_ut_module_exit(void)
{
IPA_UT_DBG("Entry\n");
@@ -1093,8 +1117,9 @@
ipa_ut_ctx = NULL;
}
+#if IPA_EMULATION_COMPILE == 0 /* On real UE, we have a module */
module_init(ipa_ut_module_init);
module_exit(ipa_ut_module_exit);
-
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("IPA Unit Test module");
+#endif
diff --git a/drivers/platform/msm/mhi_dev/mhi.c b/drivers/platform/msm/mhi_dev/mhi.c
index 1c16e5a..f06d6f3 100644
--- a/drivers/platform/msm/mhi_dev/mhi.c
+++ b/drivers/platform/msm/mhi_dev/mhi.c
@@ -1021,10 +1021,9 @@
if (rc)
pr_err("Error sending command completion event\n");
+ mhi_update_state_info(ch_id, MHI_STATE_CONNECTED);
/* Trigger callback to clients */
mhi_dev_trigger_cb();
-
- mhi_update_state_info(ch_id, MHI_STATE_CONNECTED);
if (ch_id == MHI_CLIENT_MBIM_OUT)
kobject_uevent_env(&mhi_ctx->dev->kobj,
KOBJ_CHANGE, connected);
@@ -1418,16 +1417,20 @@
union mhi_dev_ring_element_type *el;
int rc = 0;
struct mhi_req *req = (struct mhi_req *)mreq;
- struct mhi_req *local_req = NULL;
union mhi_dev_ring_element_type *compl_ev = NULL;
struct mhi_dev *mhi = NULL;
unsigned long flags;
+ size_t transfer_len;
+ u32 snd_cmpl;
+ uint32_t rd_offset;
client = req->client;
ch = client->channel;
mhi = ch->ring->mhi_dev;
el = req->el;
- local_req = req;
+ transfer_len = req->len;
+ snd_cmpl = req->snd_cmpl;
+ rd_offset = req->rd_offset;
ch->curr_ereq->context = ch;
dma_unmap_single(&mhi_ctx->pdev->dev, req->dma,
@@ -1441,14 +1444,13 @@
compl_ev->evt_tr_comp.chid = ch->ch_id;
compl_ev->evt_tr_comp.type =
MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT;
- compl_ev->evt_tr_comp.len = el->tre.len;
+ compl_ev->evt_tr_comp.len = transfer_len;
compl_ev->evt_tr_comp.code = MHI_CMD_COMPL_CODE_EOT;
compl_ev->evt_tr_comp.ptr = ch->ring->ring_ctx->generic.rbase +
- local_req->rd_offset * TR_RING_ELEMENT_SZ;
+ rd_offset * TR_RING_ELEMENT_SZ;
ch->curr_ereq->num_events++;
- if (ch->curr_ereq->num_events >= MAX_TR_EVENTS ||
- local_req->snd_cmpl){
+ if (ch->curr_ereq->num_events >= MAX_TR_EVENTS || snd_cmpl) {
mhi_log(MHI_MSG_VERBOSE,
"num of tr events %d for ch %d\n",
ch->curr_ereq->num_events, ch->ch_id);
@@ -2452,8 +2454,10 @@
* If channel is open during registration, no callback is issued.
* Instead return -EEXIST to notify the client. Clients request
* is added to the list to notify future state change notification.
+ * Channel struct may not be allocated yet if this function is called
+ * early during boot - add an explicit check for non-null "ch".
*/
- if (mhi_ctx->ch[channel].state == MHI_DEV_CH_STARTED) {
+ if (mhi_ctx->ch && (mhi_ctx->ch[channel].state == MHI_DEV_CH_STARTED)) {
mutex_unlock(&mhi_ctx->mhi_lock);
return -EEXIST;
}
@@ -2832,8 +2836,6 @@
INIT_LIST_HEAD(&mhi_ctx->event_ring_list);
INIT_LIST_HEAD(&mhi_ctx->process_ring_list);
- INIT_LIST_HEAD(&mhi_ctx->client_cb_list);
- mutex_init(&mhi_ctx->mhi_lock);
mutex_init(&mhi_ctx->mhi_event_lock);
mutex_init(&mhi_ctx->mhi_write_test);
@@ -2983,6 +2985,14 @@
dev_err(&pdev->dev,
"Failed to create IPC logging context\n");
}
+ /*
+ * The below list and mutex should be initialized
+ * before calling mhi_uci_init to avoid crash in
+ * mhi_register_state_cb when accessing these.
+ */
+ INIT_LIST_HEAD(&mhi_ctx->client_cb_list);
+ mutex_init(&mhi_ctx->mhi_lock);
+
mhi_uci_init();
mhi_update_state_info(MHI_DEV_UEVENT_CTRL,
MHI_STATE_CONFIGURED);
diff --git a/drivers/platform/msm/mhi_dev/mhi.h b/drivers/platform/msm/mhi_dev/mhi.h
index b9120fc..2dfa58d 100644
--- a/drivers/platform/msm/mhi_dev/mhi.h
+++ b/drivers/platform/msm/mhi_dev/mhi.h
@@ -712,6 +712,7 @@
MHI_CLIENT_RESERVED_2_LOWER = 102,
MHI_CLIENT_RESERVED_2_UPPER = 127,
MHI_MAX_CHANNELS = 102,
+ MHI_CLIENT_INVALID = 0xFFFFFFFF
};
/* Use ID 0 for legacy /dev/mhi_ctrl. Channel 0 is used for internal only */
diff --git a/drivers/platform/msm/mhi_dev/mhi_uci.c b/drivers/platform/msm/mhi_dev/mhi_uci.c
index ed02d0d..febc867 100644
--- a/drivers/platform/msm/mhi_dev/mhi_uci.c
+++ b/drivers/platform/msm/mhi_dev/mhi_uci.c
@@ -35,7 +35,7 @@
#define MHI_SOFTWARE_CLIENT_LIMIT (MHI_MAX_SOFTWARE_CHANNELS/2)
#define MHI_UCI_IPC_LOG_PAGES (100)
-#define MAX_NR_TRBS_PER_CHAN 1
+#define MAX_NR_TRBS_PER_CHAN 9
#define MHI_QTI_IFACE_ID 4
#define DEVICE_NAME "mhi"
@@ -70,7 +70,131 @@
u32 nr_trbs;
/* direction of the channel, see enum mhi_chan_dir */
enum mhi_chan_dir dir;
- u32 uci_ownership;
+ /* need to register mhi channel state change callback */
+ bool register_cb;
+};
+
+/* UCI channel attributes table */
+static const struct chan_attr uci_chan_attr_table[] = {
+ {
+ MHI_CLIENT_LOOPBACK_OUT,
+ TRB_MAX_DATA_SIZE,
+ MAX_NR_TRBS_PER_CHAN,
+ MHI_DIR_OUT,
+ false
+ },
+ {
+ MHI_CLIENT_LOOPBACK_IN,
+ TRB_MAX_DATA_SIZE,
+ MAX_NR_TRBS_PER_CHAN,
+ MHI_DIR_IN,
+ false
+ },
+ {
+ MHI_CLIENT_SAHARA_OUT,
+ TRB_MAX_DATA_SIZE,
+ MAX_NR_TRBS_PER_CHAN,
+ MHI_DIR_OUT,
+ false
+ },
+ {
+ MHI_CLIENT_SAHARA_IN,
+ TRB_MAX_DATA_SIZE,
+ MAX_NR_TRBS_PER_CHAN,
+ MHI_DIR_IN,
+ false
+ },
+ {
+ MHI_CLIENT_EFS_OUT,
+ TRB_MAX_DATA_SIZE,
+ MAX_NR_TRBS_PER_CHAN,
+ MHI_DIR_OUT,
+ false
+ },
+ {
+ MHI_CLIENT_EFS_IN,
+ TRB_MAX_DATA_SIZE,
+ MAX_NR_TRBS_PER_CHAN,
+ MHI_DIR_IN,
+ false
+ },
+ {
+ MHI_CLIENT_MBIM_OUT,
+ TRB_MAX_DATA_SIZE,
+ MAX_NR_TRBS_PER_CHAN,
+ MHI_DIR_OUT,
+ false
+ },
+ {
+ MHI_CLIENT_MBIM_IN,
+ TRB_MAX_DATA_SIZE,
+ MAX_NR_TRBS_PER_CHAN,
+ MHI_DIR_IN,
+ false
+ },
+ {
+ MHI_CLIENT_QMI_OUT,
+ TRB_MAX_DATA_SIZE,
+ MAX_NR_TRBS_PER_CHAN,
+ MHI_DIR_OUT,
+ false
+ },
+ {
+ MHI_CLIENT_QMI_IN,
+ TRB_MAX_DATA_SIZE,
+ MAX_NR_TRBS_PER_CHAN,
+ MHI_DIR_IN,
+ false
+ },
+ {
+ MHI_CLIENT_IP_CTRL_0_OUT,
+ TRB_MAX_DATA_SIZE,
+ MAX_NR_TRBS_PER_CHAN,
+ MHI_DIR_OUT,
+ false
+ },
+ {
+ MHI_CLIENT_IP_CTRL_0_IN,
+ TRB_MAX_DATA_SIZE,
+ MAX_NR_TRBS_PER_CHAN,
+ MHI_DIR_IN,
+ false
+ },
+ {
+ MHI_CLIENT_IP_CTRL_1_OUT,
+ TRB_MAX_DATA_SIZE,
+ MAX_NR_TRBS_PER_CHAN,
+ MHI_DIR_OUT,
+ false
+ },
+ {
+ MHI_CLIENT_IP_CTRL_1_IN,
+ TRB_MAX_DATA_SIZE,
+ MAX_NR_TRBS_PER_CHAN,
+ MHI_DIR_IN,
+ false
+ },
+ {
+ MHI_CLIENT_DUN_OUT,
+ TRB_MAX_DATA_SIZE,
+ MAX_NR_TRBS_PER_CHAN,
+ MHI_DIR_OUT,
+ false
+ },
+ {
+ MHI_CLIENT_DUN_IN,
+ TRB_MAX_DATA_SIZE,
+ MAX_NR_TRBS_PER_CHAN,
+ MHI_DIR_IN,
+ false
+ },
+ { /* Must be the last */
+ MHI_CLIENT_INVALID,
+ 0,
+ 0,
+ MHI_DIR_INVALID,
+ false
+ },
};
struct uci_ctrl {
@@ -87,6 +211,8 @@
u32 in_chan;
struct mhi_dev_client *out_handle;
struct mhi_dev_client *in_handle;
+ const struct chan_attr *in_chan_attr;
+ const struct chan_attr *out_chan_attr;
wait_queue_head_t read_wq;
wait_queue_head_t write_wq;
atomic_t read_data_ready;
@@ -104,7 +230,6 @@
};
struct mhi_uci_ctxt_t {
- struct chan_attr chan_attrib[MHI_MAX_SOFTWARE_CHANNELS];
struct uci_client client_handles[MHI_SOFTWARE_CLIENT_LIMIT];
struct uci_ctrl ctrl_handle;
void (*event_notifier)(struct mhi_dev_client_cb_reason *cb);
@@ -119,6 +244,7 @@
};
#define CHAN_TO_CLIENT(_CHAN_NR) (_CHAN_NR / 2)
+#define CLIENT_TO_CHAN(_CLIENT_NR) (_CLIENT_NR * 2)
#define uci_log(_msg_lvl, _msg, ...) do { \
if (_msg_lvl >= mhi_uci_msg_lvl) { \
@@ -156,7 +282,7 @@
{
int rc = 0;
u32 i, j;
- struct chan_attr *chan_attributes;
+ const struct chan_attr *in_chan_attr;
size_t buf_size;
void *data_loc;
@@ -169,10 +295,15 @@
return -EINVAL;
}
- chan_attributes = &uci_ctxt.chan_attrib[chan];
- buf_size = chan_attributes->max_packet_size;
+ in_chan_attr = client_handle->in_chan_attr;
+ if (!in_chan_attr) {
+ uci_log(UCI_DBG_ERROR, "Null channel attributes for chan %d\n",
+ client_handle->in_chan);
+ return -EINVAL;
+ }
+ buf_size = in_chan_attr->max_packet_size;
- for (i = 0; i < (chan_attributes->nr_trbs); i++) {
+ for (i = 0; i < (in_chan_attr->nr_trbs); i++) {
data_loc = kmalloc(buf_size, GFP_KERNEL);
if (!data_loc) {
rc = -ENOMEM;
@@ -397,20 +528,11 @@
struct file *file_handle)
{
struct uci_client *uci_handle = file_handle->private_data;
- struct mhi_uci_ctxt_t *uci_ctxt;
- u32 nr_in_bufs = 0;
int rc = 0;
- int in_chan = 0;
- u32 buf_size = 0;
if (!uci_handle)
return -EINVAL;
- uci_ctxt = uci_handle->uci_ctxt;
- in_chan = iminor(mhi_inode) + 1;
- nr_in_bufs = uci_ctxt->chan_attrib[in_chan].nr_trbs;
- buf_size = uci_ctxt->chan_attrib[in_chan].max_packet_size;
-
if (atomic_sub_return(1, &uci_handle->ref_count) == 0) {
uci_log(UCI_DBG_DBG,
"Last client left, closing channel 0x%x\n",
@@ -750,54 +872,25 @@
static int uci_init_client_attributes(struct mhi_uci_ctxt_t *uci_ctxt)
{
- u32 i = 0;
- u32 data_size = TRB_MAX_DATA_SIZE;
- u32 index = 0;
+ u32 i;
+ u32 index;
struct uci_client *client;
- struct chan_attr *chan_attrib = NULL;
+ const struct chan_attr *chan_attrib;
- for (i = 0; i < ARRAY_SIZE(uci_ctxt->chan_attrib); i++) {
- chan_attrib = &uci_ctxt->chan_attrib[i];
- switch (i) {
- case MHI_CLIENT_LOOPBACK_OUT:
- case MHI_CLIENT_LOOPBACK_IN:
- case MHI_CLIENT_SAHARA_OUT:
- case MHI_CLIENT_SAHARA_IN:
- case MHI_CLIENT_EFS_OUT:
- case MHI_CLIENT_EFS_IN:
- case MHI_CLIENT_MBIM_OUT:
- case MHI_CLIENT_MBIM_IN:
- case MHI_CLIENT_QMI_OUT:
- case MHI_CLIENT_QMI_IN:
- case MHI_CLIENT_IP_CTRL_0_OUT:
- case MHI_CLIENT_IP_CTRL_0_IN:
- case MHI_CLIENT_IP_CTRL_1_OUT:
- case MHI_CLIENT_IP_CTRL_1_IN:
- case MHI_CLIENT_DUN_OUT:
- case MHI_CLIENT_DUN_IN:
- chan_attrib->uci_ownership = 1;
+ for (i = 0; i < ARRAY_SIZE(uci_chan_attr_table); i += 2) {
+ chan_attrib = &uci_chan_attr_table[i];
+ if (chan_attrib->chan_id == MHI_CLIENT_INVALID)
break;
- default:
- chan_attrib->uci_ownership = 0;
- break;
- }
- if (chan_attrib->uci_ownership) {
- chan_attrib->chan_id = i;
- chan_attrib->max_packet_size = data_size;
- index = CHAN_TO_CLIENT(i);
- client = &uci_ctxt->client_handles[index];
- chan_attrib->nr_trbs = 9;
- client->in_buf_list =
- kmalloc(sizeof(struct mhi_dev_iov) *
- chan_attrib->nr_trbs,
+ index = CHAN_TO_CLIENT(i);
+ client = &uci_ctxt->client_handles[index];
+ client->out_chan_attr = chan_attrib;
+ client->in_chan_attr = ++chan_attrib;
+ client->in_buf_list =
+ kcalloc(chan_attrib->nr_trbs,
+ sizeof(struct mhi_dev_iov),
GFP_KERNEL);
- if (client->in_buf_list == NULL)
- return -ENOMEM;
- }
- if (i % 2 == 0)
- chan_attrib->dir = MHI_DIR_OUT;
- else
- chan_attrib->dir = MHI_DIR_IN;
+ if (!client->in_buf_list)
+ return -ENOMEM;
}
return 0;
}
@@ -949,16 +1042,14 @@
uci_log(UCI_DBG_INFO, "Registering for MHI events.\n");
for (i = 0; i < MHI_SOFTWARE_CLIENT_LIMIT; i++) {
- if (uci_ctxt.chan_attrib[i * 2].uci_ownership) {
- mhi_client = &uci_ctxt.client_handles[i];
-
- r = mhi_register_client(mhi_client, i);
-
- if (r) {
- uci_log(UCI_DBG_CRITICAL,
- "Failed to reg client %d ret %d\n",
- r, i);
- }
+ mhi_client = &uci_ctxt.client_handles[i];
+ if (!mhi_client->in_chan_attr)
+ continue;
+ r = mhi_register_client(mhi_client, i);
+ if (r) {
+ uci_log(UCI_DBG_CRITICAL,
+ "Failed to reg client %d ret %d\n",
+ r, i);
}
}
@@ -992,29 +1083,30 @@
uci_log(UCI_DBG_INFO, "Setting up device nodes.\n");
for (i = 0; i < MHI_SOFTWARE_CLIENT_LIMIT; i++) {
- if (uci_ctxt.chan_attrib[i*2].uci_ownership) {
- cdev_init(&uci_ctxt.cdev[i], &mhi_uci_client_fops);
- uci_ctxt.cdev[i].owner = THIS_MODULE;
- r = cdev_add(&uci_ctxt.cdev[i],
- uci_ctxt.start_ctrl_nr + i, 1);
- if (IS_ERR_VALUE(r)) {
- uci_log(UCI_DBG_ERROR,
- "Failed to add cdev %d, ret 0x%x\n",
- i, r);
- goto failed_char_add;
- }
+ mhi_client = &uci_ctxt.client_handles[i];
+ if (!mhi_client->in_chan_attr)
+ continue;
+ cdev_init(&uci_ctxt.cdev[i], &mhi_uci_client_fops);
+ uci_ctxt.cdev[i].owner = THIS_MODULE;
+ r = cdev_add(&uci_ctxt.cdev[i],
+ uci_ctxt.start_ctrl_nr + i, 1);
+ if (IS_ERR_VALUE(r)) {
+ uci_log(UCI_DBG_ERROR,
+ "Failed to add cdev %d, ret 0x%x\n",
+ i, r);
+ goto failed_char_add;
+ }
- uci_ctxt.client_handles[i].dev =
- device_create(uci_ctxt.mhi_uci_class, NULL,
- uci_ctxt.start_ctrl_nr + i,
- NULL, DEVICE_NAME "_pipe_%d",
- i * 2);
- if (IS_ERR(uci_ctxt.client_handles[i].dev)) {
- uci_log(UCI_DBG_ERROR,
- "Failed to add cdev %d\n", i);
- cdev_del(&uci_ctxt.cdev[i]);
- goto failed_device_create;
- }
+ uci_ctxt.client_handles[i].dev =
+ device_create(uci_ctxt.mhi_uci_class, NULL,
+ uci_ctxt.start_ctrl_nr + i,
+ NULL, DEVICE_NAME "_pipe_%d",
+ i * 2);
+ if (IS_ERR(uci_ctxt.client_handles[i].dev)) {
+ uci_log(UCI_DBG_ERROR,
+ "Failed to add cdev %d\n", i);
+ cdev_del(&uci_ctxt.cdev[i]);
+ goto failed_device_create;
}
}
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 3dd1722..3ab93fe 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -331,6 +331,10 @@
POWER_SUPPLY_ATTR(allow_hvdcp3),
POWER_SUPPLY_ATTR(hvdcp_opti_allowed),
POWER_SUPPLY_ATTR(max_pulse_allowed),
+ POWER_SUPPLY_ATTR(ignore_false_negative_isense),
+ POWER_SUPPLY_ATTR(battery_info),
+ POWER_SUPPLY_ATTR(battery_info_id),
+ POWER_SUPPLY_ATTR(enable_jeita_detection),
/* Local extensions of type int64_t */
POWER_SUPPLY_ATTR(charge_counter_ext),
/* Properties of type `const char *' */
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 275b982..e8d91ae 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -78,6 +78,7 @@
struct class qcom_batt_class;
struct wakeup_source *pl_ws;
struct notifier_block nb;
+ bool pl_disable;
};
struct pl_data *the_chip;
@@ -88,6 +89,7 @@
enum {
AICL_RERUN_WA_BIT = BIT(0),
+ FORCE_INOV_DISABLE_BIT = BIT(1),
};
static int debug_mask;
@@ -848,6 +850,12 @@
chip->pl_settled_ua = 0;
}
+ /* notify parallel state change */
+ if (chip->pl_psy && (chip->pl_disable != pl_disable)) {
+ power_supply_changed(chip->pl_psy);
+ chip->pl_disable = (bool)pl_disable;
+ }
+
pl_dbg(chip, PR_PARALLEL, "parallel charging %s\n",
pl_disable ? "disabled" : "enabled");
@@ -916,7 +924,8 @@
chip->pl_mode = pval.intval;
/* Disable autonomous votage increments for USBIN-USBIN */
- if (IS_USBIN(chip->pl_mode)) {
+ if (IS_USBIN(chip->pl_mode)
+ && (chip->wa_flags & FORCE_INOV_DISABLE_BIT)) {
if (!chip->hvdcp_hw_inov_dis_votable)
chip->hvdcp_hw_inov_dis_votable =
find_votable("HVDCP_HW_INOV_DIS");
@@ -1041,7 +1050,6 @@
else
vote(chip->pl_enable_votable_indirect, USBIN_I_VOTER, true, 0);
- rerun_election(chip->fcc_votable);
if (IS_USBIN(chip->pl_mode)) {
/*
@@ -1200,7 +1208,9 @@
switch (smb_version) {
case PMI8998_SUBTYPE:
case PM660_SUBTYPE:
- chip->wa_flags = AICL_RERUN_WA_BIT;
+ chip->wa_flags = AICL_RERUN_WA_BIT | FORCE_INOV_DISABLE_BIT;
+ break;
+ case PMI632_SUBTYPE:
break;
default:
break;
@@ -1301,6 +1311,7 @@
goto unreg_notifier;
}
+ chip->pl_disable = true;
chip->qcom_batt_class.name = "qcom-battery",
chip->qcom_batt_class.owner = THIS_MODULE,
chip->qcom_batt_class.class_attrs = pl_attributes;
diff --git a/drivers/power/supply/qcom/fg-alg.c b/drivers/power/supply/qcom/fg-alg.c
index 9b9a880..129af7b 100644
--- a/drivers/power/supply/qcom/fg-alg.c
+++ b/drivers/power/supply/qcom/fg-alg.c
@@ -19,6 +19,7 @@
#include "fg-alg.h"
#define FULL_SOC_RAW 255
+#define FULL_BATT_SOC GENMASK(31, 0)
#define CAPACITY_DELTA_DECIPCT 500
/* Cycle counter APIs */
@@ -351,7 +352,7 @@
*/
static int cap_learning_process_full_data(struct cap_learning *cl)
{
- int rc, cc_soc_sw, cc_soc_delta_pct;
+ int rc, cc_soc_sw, cc_soc_delta_centi_pct;
int64_t delta_cap_uah;
rc = cl->get_cc_soc(cl->data, &cc_soc_sw);
@@ -360,20 +361,21 @@
return rc;
}
- cc_soc_delta_pct =
- div64_s64((int64_t)(cc_soc_sw - cl->init_cc_soc_sw) * 100,
+ cc_soc_delta_centi_pct =
+ div64_s64((int64_t)(cc_soc_sw - cl->init_cc_soc_sw) * 10000,
cl->cc_soc_max);
/* If the delta is < 50%, then skip processing full data */
- if (cc_soc_delta_pct < 50) {
- pr_err("cc_soc_delta_pct: %d\n", cc_soc_delta_pct);
+ if (cc_soc_delta_centi_pct < 5000) {
+ pr_err("cc_soc_delta_centi_pct: %d\n", cc_soc_delta_centi_pct);
return -ERANGE;
}
- delta_cap_uah = div64_s64(cl->learned_cap_uah * cc_soc_delta_pct, 100);
+ delta_cap_uah = div64_s64(cl->learned_cap_uah * cc_soc_delta_centi_pct,
+ 10000);
cl->final_cap_uah = cl->init_cap_uah + delta_cap_uah;
- pr_debug("Current cc_soc=%d cc_soc_delta_pct=%d total_cap_uah=%lld\n",
- cc_soc_sw, cc_soc_delta_pct, cl->final_cap_uah);
+ pr_debug("Current cc_soc=%d cc_soc_delta_centi_pct=%d total_cap_uah=%lld\n",
+ cc_soc_sw, cc_soc_delta_centi_pct, cl->final_cap_uah);
return 0;
}
@@ -401,8 +403,8 @@
return -EINVAL;
}
- cl->init_cap_uah = div64_s64(cl->learned_cap_uah * batt_soc_msb,
- FULL_SOC_RAW);
+ cl->init_cap_uah = div64_s64(cl->learned_cap_uah * batt_soc,
+ FULL_BATT_SOC);
if (cl->prime_cc_soc) {
/*
diff --git a/drivers/power/supply/qcom/qpnp-fg.c b/drivers/power/supply/qcom/qpnp-fg.c
index 015da41..deccb20 100644
--- a/drivers/power/supply/qcom/qpnp-fg.c
+++ b/drivers/power/supply/qcom/qpnp-fg.c
@@ -33,6 +33,7 @@
#include <linux/ktime.h>
#include <linux/power_supply.h>
#include <linux/of_batterydata.h>
+#include <linux/spinlock.h>
#include <linux/string_helpers.h>
#include <linux/alarmtimer.h>
#include <linux/qpnp/qpnp-revid.h>
@@ -72,6 +73,7 @@
#define QPNP_FG_DEV_NAME "qcom,qpnp-fg"
#define MEM_IF_TIMEOUT_MS 5000
+#define FG_CYCLE_MS 1500
#define BUCKET_COUNT 8
#define BUCKET_SOC_PCT (256 / BUCKET_COUNT)
@@ -108,6 +110,7 @@
PMI8950 = 17,
PMI8996 = 19,
PMI8937 = 55,
+ PMI8940 = 64,
};
enum wa_flags {
@@ -150,6 +153,8 @@
int min_temp;
int max_temp;
int vbat_est_thr_uv;
+ int max_cap_limit;
+ int min_cap_limit;
};
struct fg_rslow_data {
@@ -275,11 +280,45 @@
DATA(BATT_ID_INFO, 0x594, 3, 1, -EINVAL),
};
+enum fg_mem_backup_index {
+ FG_BACKUP_SOC = 0,
+ FG_BACKUP_CYCLE_COUNT,
+ FG_BACKUP_CC_SOC_COEFF,
+ FG_BACKUP_IGAIN,
+ FG_BACKUP_VCOR,
+ FG_BACKUP_TEMP_COUNTER,
+ FG_BACKUP_AGING_STORAGE,
+ FG_BACKUP_MAH_TO_SOC,
+ FG_BACKUP_MAX,
+};
+
+#define BACKUP(_idx, _address, _offset, _length, _value) \
+ [FG_BACKUP_##_idx] = { \
+ .address = _address, \
+ .offset = _offset, \
+ .len = _length, \
+ .value = _value, \
+ } \
+
+static struct fg_mem_data fg_backup_regs[FG_BACKUP_MAX] = {
+ /* ID Address, Offset, Length, Value*/
+ BACKUP(SOC, 0x564, 0, 24, -EINVAL),
+ BACKUP(CYCLE_COUNT, 0x5E8, 0, 16, -EINVAL),
+ BACKUP(CC_SOC_COEFF, 0x5BC, 0, 8, -EINVAL),
+ BACKUP(IGAIN, 0x424, 0, 4, -EINVAL),
+ BACKUP(VCOR, 0x484, 0, 4, -EINVAL),
+ BACKUP(TEMP_COUNTER, 0x580, 0, 4, -EINVAL),
+ BACKUP(AGING_STORAGE, 0x5E4, 0, 4, -EINVAL),
+ BACKUP(MAH_TO_SOC, 0x4A0, 0, 4, -EINVAL),
+};
+
static int fg_debug_mask;
module_param_named(
debug_mask, fg_debug_mask, int, 00600
);
+static int fg_reset_on_lockup;
+
static int fg_sense_type = -EINVAL;
static int fg_restart;
@@ -298,9 +337,18 @@
sram_update_period_ms, fg_sram_update_period_ms, int, 00600
);
+static bool fg_batt_valid_ocv;
+module_param_named(batt_valid_ocv, fg_batt_valid_ocv, bool, 0600
+);
+
+static int fg_batt_range_pct;
+module_param_named(batt_range_pct, fg_batt_range_pct, int, 0600
+);
+
struct fg_irq {
int irq;
- unsigned long disabled;
+ bool disabled;
+ bool wakeup;
};
enum fg_soc_irq {
@@ -348,6 +396,16 @@
MAX_ADDRESS,
};
+enum batt_info_params {
+ BATT_INFO_NOTIFY = 0,
+ BATT_INFO_SOC,
+ BATT_INFO_RES_ID,
+ BATT_INFO_VOLTAGE,
+ BATT_INFO_TEMP,
+ BATT_INFO_FCC,
+ BATT_INFO_MAX,
+};
+
struct register_offset {
u16 address[MAX_ADDRESS];
};
@@ -395,6 +453,22 @@
}
}
+enum slope_limit_status {
+ LOW_TEMP_CHARGE,
+ HIGH_TEMP_CHARGE,
+ LOW_TEMP_DISCHARGE,
+ HIGH_TEMP_DISCHARGE,
+ SLOPE_LIMIT_MAX,
+};
+
+#define VOLT_GAIN_MAX 3
+struct dischg_gain_soc {
+ bool enable;
+ u32 soc[VOLT_GAIN_MAX];
+ u32 medc_gain[VOLT_GAIN_MAX];
+ u32 highc_gain[VOLT_GAIN_MAX];
+};
+
#define THERMAL_COEFF_N_BYTES 6
struct fg_chip {
struct device *dev;
@@ -420,6 +494,7 @@
struct completion first_soc_done;
struct power_supply *bms_psy;
struct power_supply_desc bms_psy_d;
+ spinlock_t sec_access_lock;
struct mutex rw_lock;
struct mutex sysfs_restart_lock;
struct delayed_work batt_profile_init;
@@ -449,6 +524,7 @@
struct fg_wakeup_source update_sram_wakeup_source;
bool fg_restarting;
bool profile_loaded;
+ bool soc_reporting_ready;
bool use_otp_profile;
bool battery_missing;
bool power_supply_registered;
@@ -459,6 +535,7 @@
bool charge_done;
bool resume_soc_lowered;
bool vbat_low_irq_enabled;
+ bool full_soc_irq_enabled;
bool charge_full;
bool hold_soc_while_full;
bool input_present;
@@ -467,6 +544,10 @@
bool bad_batt_detection_en;
bool bcl_lpm_disabled;
bool charging_disabled;
+ bool use_vbat_low_empty_soc;
+ bool fg_shutdown;
+ bool use_soft_jeita_irq;
+ bool allow_false_negative_isense;
struct delayed_work update_jeita_setting;
struct delayed_work update_sram_data;
struct delayed_work update_temp_work;
@@ -491,6 +572,7 @@
int prev_status;
int health;
enum fg_batt_aging_mode batt_aging_mode;
+ struct alarm hard_jeita_alarm;
/* capacity learning */
struct fg_learning_data learning_data;
struct alarm fg_cap_learning_alarm;
@@ -498,6 +580,7 @@
struct fg_cc_soc_data sw_cc_soc_data;
/* rslow compensation */
struct fg_rslow_data rslow_comp;
+ int rconn_mohm;
/* cycle counter */
struct fg_cyc_ctr_data cyc_ctr;
/* iadc compensation */
@@ -510,6 +593,8 @@
bool jeita_hysteresis_support;
bool batt_hot;
bool batt_cold;
+ bool batt_warm;
+ bool batt_cool;
int cold_hysteresis;
int hot_hysteresis;
/* ESR pulse tuning */
@@ -518,6 +603,47 @@
bool esr_extract_disabled;
bool imptr_pulse_slow_en;
bool esr_pulse_tune_en;
+ /* Slope limiter */
+ struct work_struct slope_limiter_work;
+ struct fg_wakeup_source slope_limit_wakeup_source;
+ bool soc_slope_limiter_en;
+ enum slope_limit_status slope_limit_sts;
+ u32 slope_limit_temp;
+ u32 slope_limit_coeffs[SLOPE_LIMIT_MAX];
+ /* Discharge soc gain */
+ struct work_struct dischg_gain_work;
+ struct fg_wakeup_source dischg_gain_wakeup_source;
+ struct dischg_gain_soc dischg_gain;
+ /* IMA error recovery */
+ struct completion fg_reset_done;
+ struct work_struct ima_error_recovery_work;
+ struct fg_wakeup_source fg_reset_wakeup_source;
+ struct mutex ima_recovery_lock;
+ bool ima_error_handling;
+ bool block_sram_access;
+ bool irqs_enabled;
+ bool use_last_soc;
+ int last_soc;
+ /* Validating temperature */
+ int last_good_temp;
+ int batt_temp_low_limit;
+ int batt_temp_high_limit;
+ /* Validating CC_SOC */
+ struct work_struct cc_soc_store_work;
+ struct fg_wakeup_source cc_soc_wakeup_source;
+ int cc_soc_limit_pct;
+ bool use_last_cc_soc;
+ int64_t last_cc_soc;
+ /* Sanity check */
+ struct delayed_work check_sanity_work;
+ struct fg_wakeup_source sanity_wakeup_source;
+ u8 last_beat_count;
+ /* Batt_info restore */
+ int batt_info[BATT_INFO_MAX];
+ int batt_info_id;
+ bool batt_info_restore;
+ bool *batt_range_ocv;
+ int *batt_range_pct;
};
/* FG_MEMIF DEBUGFS structures */
@@ -661,17 +787,56 @@
return rc;
}
-static int fg_masked_write(struct fg_chip *chip, u16 addr,
+static int fg_masked_write_raw(struct fg_chip *chip, u16 addr,
u8 mask, u8 val, int len)
{
int rc;
rc = regmap_update_bits(chip->regmap, addr, mask, val);
- if (rc) {
+ if (rc)
pr_err("spmi write failed: addr=%03X, rc=%d\n", addr, rc);
- return rc;
+
+ return rc;
+}
+
+static int fg_masked_write(struct fg_chip *chip, u16 addr,
+ u8 mask, u8 val, int len)
+{
+ int rc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->sec_access_lock, flags);
+ rc = fg_masked_write_raw(chip, addr, mask, val, len);
+ spin_unlock_irqrestore(&chip->sec_access_lock, flags);
+
+ return rc;
+}
+
+#define SEC_ACCESS_OFFSET 0xD0
+#define SEC_ACCESS_VALUE 0xA5
+#define PERIPHERAL_MASK 0xFF
+static int fg_sec_masked_write(struct fg_chip *chip, u16 addr, u8 mask, u8 val,
+ int len)
+{
+ int rc;
+ unsigned long flags;
+ u8 temp;
+ u16 base = addr & (~PERIPHERAL_MASK);
+
+ spin_lock_irqsave(&chip->sec_access_lock, flags);
+ temp = SEC_ACCESS_VALUE;
+ rc = fg_write(chip, &temp, base + SEC_ACCESS_OFFSET, 1);
+ if (rc) {
+ pr_err("Unable to unlock sec_access: %d\n", rc);
+ goto out;
}
+ rc = fg_masked_write_raw(chip, addr, mask, val, len);
+ if (rc)
+ pr_err("Unable to write securely to address 0x%x: %d", addr,
+ rc);
+out:
+ spin_unlock_irqrestore(&chip->sec_access_lock, flags);
return rc;
}
@@ -952,6 +1117,7 @@
int rc = 0, user_cnt = 0, sublen;
bool access_configured = false;
u8 *wr_data = val, word[4];
+ u16 orig_address = address;
char str[DEBUG_PRINT_BUFFER_SIZE];
if (address < RAM_OFFSET)
@@ -960,8 +1126,8 @@
if (offset > 3)
return -EINVAL;
- address = ((address + offset) / 4) * 4;
- offset = (address + offset) % 4;
+ address = ((orig_address + offset) / 4) * 4;
+ offset = (orig_address + offset) % 4;
user_cnt = atomic_add_return(1, &chip->memif_user_cnt);
if (fg_debug_mask & FG_MEM_DEBUG_WRITES)
@@ -1061,50 +1227,253 @@
#define MEM_INTF_IMA_EXP_STS 0x55
#define MEM_INTF_IMA_HW_STS 0x56
#define MEM_INTF_IMA_BYTE_EN 0x60
-#define IMA_ADDR_STBL_ERR BIT(7)
-#define IMA_WR_ACS_ERR BIT(6)
-#define IMA_RD_ACS_ERR BIT(5)
#define IMA_IACS_CLR BIT(2)
#define IMA_IACS_RDY BIT(1)
-static int fg_check_ima_exception(struct fg_chip *chip)
+static int fg_run_iacs_clear_sequence(struct fg_chip *chip)
+{
+ int rc = 0;
+ u8 temp;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Running IACS clear sequence\n");
+
+ /* clear the error */
+ rc = fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
+ IMA_IACS_CLR, IMA_IACS_CLR, 1);
+ if (rc) {
+ pr_err("Error writing to IMA_CFG, rc=%d\n", rc);
+ return rc;
+ }
+
+ temp = 0x4;
+ rc = fg_write(chip, &temp, MEM_INTF_ADDR_LSB(chip) + 1, 1);
+ if (rc) {
+ pr_err("Error writing to MEM_INTF_ADDR_MSB, rc=%d\n", rc);
+ return rc;
+ }
+
+ temp = 0x0;
+ rc = fg_write(chip, &temp, MEM_INTF_WR_DATA0(chip) + 3, 1);
+ if (rc) {
+ pr_err("Error writing to WR_DATA3, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_read(chip, &temp, MEM_INTF_RD_DATA0(chip) + 3, 1);
+ if (rc) {
+ pr_err("Error writing to RD_DATA3, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
+ IMA_IACS_CLR, 0, 1);
+ if (rc) {
+ pr_err("Error writing to IMA_CFG, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("IACS clear sequence complete!\n");
+ return rc;
+}
+
+#define IACS_ERR_BIT BIT(0)
+#define XCT_ERR_BIT BIT(1)
+#define DATA_RD_ERR_BIT BIT(3)
+#define DATA_WR_ERR_BIT BIT(4)
+#define ADDR_BURST_WRAP_BIT BIT(5)
+#define ADDR_RNG_ERR_BIT BIT(6)
+#define ADDR_SRC_ERR_BIT BIT(7)
+static int fg_check_ima_exception(struct fg_chip *chip, bool check_hw_sts)
{
int rc = 0, ret = 0;
- u8 err_sts, exp_sts = 0, hw_sts = 0;
+ u8 err_sts = 0, exp_sts = 0, hw_sts = 0;
+ bool run_err_clr_seq = false;
rc = fg_read(chip, &err_sts,
chip->mem_base + MEM_INTF_IMA_ERR_STS, 1);
if (rc) {
- pr_err("failed to read beat count rc=%d\n", rc);
+ pr_err("failed to read IMA_ERR_STS, rc=%d\n", rc);
return rc;
}
- if (err_sts & (IMA_ADDR_STBL_ERR | IMA_WR_ACS_ERR | IMA_RD_ACS_ERR)) {
- u8 temp;
-
- fg_read(chip, &exp_sts,
+ rc = fg_read(chip, &exp_sts,
chip->mem_base + MEM_INTF_IMA_EXP_STS, 1);
- fg_read(chip, &hw_sts,
- chip->mem_base + MEM_INTF_IMA_HW_STS, 1);
- pr_err("IMA access failed ima_err_sts=%x ima_exp_sts=%x ima_hw_sts=%x\n",
- err_sts, exp_sts, hw_sts);
- rc = err_sts;
-
- /* clear the error */
- ret |= fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
- IMA_IACS_CLR, IMA_IACS_CLR, 1);
- temp = 0x4;
- ret |= fg_write(chip, &temp, MEM_INTF_ADDR_LSB(chip) + 1, 1);
- temp = 0x0;
- ret |= fg_write(chip, &temp, MEM_INTF_WR_DATA0(chip) + 3, 1);
- ret |= fg_read(chip, &temp, MEM_INTF_RD_DATA0(chip) + 3, 1);
- ret |= fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
- IMA_IACS_CLR, 0, 1);
- if (!ret)
- return -EAGAIN;
-
- pr_err("Error clearing IMA exception ret=%d\n", ret);
+ if (rc) {
+ pr_err("Error in reading IMA_EXP_STS, rc=%d\n", rc);
+ return rc;
}
+ rc = fg_read(chip, &hw_sts,
+ chip->mem_base + MEM_INTF_IMA_HW_STS, 1);
+ if (rc) {
+ pr_err("Error in reading IMA_HW_STS, rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_info_once("Initial ima_err_sts=%x ima_exp_sts=%x ima_hw_sts=%x\n",
+ err_sts, exp_sts, hw_sts);
+
+ if (fg_debug_mask & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
+ pr_info("ima_err_sts=%x ima_exp_sts=%x ima_hw_sts=%x\n",
+ err_sts, exp_sts, hw_sts);
+
+ if (check_hw_sts) {
+ /*
+ * Lower nibble should be equal to upper nibble before SRAM
+ * transactions begins from SW side. If they are unequal, then
+ * the error clear sequence should be run irrespective of IMA
+ * exception errors.
+ */
+ if ((hw_sts & 0x0F) != hw_sts >> 4) {
+ pr_err("IMA HW not in correct state, hw_sts=%x\n",
+ hw_sts);
+ run_err_clr_seq = true;
+ }
+ }
+
+ if (exp_sts & (IACS_ERR_BIT | XCT_ERR_BIT | DATA_RD_ERR_BIT |
+ DATA_WR_ERR_BIT | ADDR_BURST_WRAP_BIT | ADDR_RNG_ERR_BIT |
+ ADDR_SRC_ERR_BIT)) {
+ pr_err("IMA exception bit set, exp_sts=%x\n", exp_sts);
+ run_err_clr_seq = true;
+ }
+
+ if (run_err_clr_seq) {
+ ret = fg_run_iacs_clear_sequence(chip);
+ if (!ret)
+ return -EAGAIN;
+ else
+ pr_err("Error clearing IMA exception ret=%d\n", ret);
+ }
+
+ return rc;
+}
+
+static void fg_enable_irqs(struct fg_chip *chip, bool enable)
+{
+ if (!(enable ^ chip->irqs_enabled))
+ return;
+
+ if (enable) {
+ enable_irq(chip->soc_irq[DELTA_SOC].irq);
+ enable_irq_wake(chip->soc_irq[DELTA_SOC].irq);
+ if (!chip->full_soc_irq_enabled) {
+ enable_irq(chip->soc_irq[FULL_SOC].irq);
+ enable_irq_wake(chip->soc_irq[FULL_SOC].irq);
+ chip->full_soc_irq_enabled = true;
+ }
+ enable_irq(chip->batt_irq[BATT_MISSING].irq);
+ if (!chip->vbat_low_irq_enabled) {
+ enable_irq(chip->batt_irq[VBATT_LOW].irq);
+ enable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+ chip->vbat_low_irq_enabled = true;
+ }
+ if (!chip->use_vbat_low_empty_soc) {
+ enable_irq(chip->soc_irq[EMPTY_SOC].irq);
+ enable_irq_wake(chip->soc_irq[EMPTY_SOC].irq);
+ }
+ chip->irqs_enabled = true;
+ } else {
+ disable_irq_wake(chip->soc_irq[DELTA_SOC].irq);
+ disable_irq_nosync(chip->soc_irq[DELTA_SOC].irq);
+ if (chip->full_soc_irq_enabled) {
+ disable_irq_wake(chip->soc_irq[FULL_SOC].irq);
+ disable_irq_nosync(chip->soc_irq[FULL_SOC].irq);
+ chip->full_soc_irq_enabled = false;
+ }
+ disable_irq(chip->batt_irq[BATT_MISSING].irq);
+ if (chip->vbat_low_irq_enabled) {
+ disable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+ disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
+ chip->vbat_low_irq_enabled = false;
+ }
+ if (!chip->use_vbat_low_empty_soc) {
+ disable_irq_wake(chip->soc_irq[EMPTY_SOC].irq);
+ disable_irq_nosync(chip->soc_irq[EMPTY_SOC].irq);
+ }
+ chip->irqs_enabled = false;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("FG interrupts are %sabled\n", enable ? "en" : "dis");
+}
+
+static void fg_check_ima_error_handling(struct fg_chip *chip)
+{
+ if (chip->ima_error_handling) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("IMA error is handled already!\n");
+ return;
+ }
+ mutex_lock(&chip->ima_recovery_lock);
+ fg_enable_irqs(chip, false);
+ chip->use_last_cc_soc = true;
+ chip->ima_error_handling = true;
+ if (!work_pending(&chip->ima_error_recovery_work))
+ schedule_work(&chip->ima_error_recovery_work);
+ mutex_unlock(&chip->ima_recovery_lock);
+}
+
+#define SOC_ALG_ST 0xCF
+#define FGXCT_PRD BIT(7)
+#define ALG_ST_CHECK_COUNT 20
+static int fg_check_alg_status(struct fg_chip *chip)
+{
+ int rc = 0, timeout = ALG_ST_CHECK_COUNT, count = 0;
+ u8 ima_opr_sts, alg_sts = 0, temp = 0;
+
+ if (!fg_reset_on_lockup) {
+ pr_info("FG lockup detection cannot be run\n");
+ return 0;
+ }
+
+ rc = fg_read(chip, &alg_sts, chip->soc_base + SOC_ALG_ST, 1);
+ if (rc) {
+ pr_err("Error in reading SOC_ALG_ST, rc=%d\n", rc);
+ return rc;
+ }
+
+ while (1) {
+ rc = fg_read(chip, &ima_opr_sts,
+ chip->mem_base + MEM_INTF_IMA_OPR_STS, 1);
+ if (!rc && !(ima_opr_sts & FGXCT_PRD))
+ break;
+
+ if (rc) {
+ pr_err("Error in reading IMA_OPR_STS, rc=%d\n",
+ rc);
+ break;
+ }
+
+ rc = fg_read(chip, &temp, chip->soc_base + SOC_ALG_ST,
+ 1);
+ if (rc) {
+ pr_err("Error in reading SOC_ALG_ST, rc=%d\n",
+ rc);
+ break;
+ }
+
+ if ((ima_opr_sts & FGXCT_PRD) && (temp == alg_sts))
+ count++;
+
+ /* Wait for ~10ms while polling ALG_ST & IMA_OPR_STS */
+ usleep_range(9000, 11000);
+
+ if (!(--timeout))
+ break;
+ }
+
+ if (fg_debug_mask & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
+ pr_info("ima_opr_sts: %x alg_sts: %x count=%d\n", ima_opr_sts,
+ alg_sts, count);
+
+ if (count == ALG_ST_CHECK_COUNT) {
+ /* If we are here, that means FG ALG is stuck */
+ pr_err("ALG is stuck\n");
+ fg_check_ima_error_handling(chip);
+ rc = -EBUSY;
+ }
return rc;
}
@@ -1122,19 +1491,25 @@
while (1) {
rc = fg_read(chip, &ima_opr_sts,
chip->mem_base + MEM_INTF_IMA_OPR_STS, 1);
- if (!rc && (ima_opr_sts & IMA_IACS_RDY))
+ if (!rc && (ima_opr_sts & IMA_IACS_RDY)) {
break;
+ } else {
+ if (!(--timeout) || rc)
+ break;
- if (!(--timeout) || rc)
- break;
- /* delay for iacs_ready to be asserted */
- usleep_range(5000, 7000);
+ /* delay for iacs_ready to be asserted */
+ usleep_range(5000, 7000);
+ }
}
if (!timeout || rc) {
- pr_err("IACS_RDY not set\n");
+ pr_err("IACS_RDY not set, ima_opr_sts: %x\n", ima_opr_sts);
+ rc = fg_check_alg_status(chip);
+ if (rc && rc != -EBUSY)
+ pr_err("Couldn't check FG ALG status, rc=%d\n",
+ rc);
/* perform IACS_CLR sequence */
- fg_check_ima_exception(chip);
+ fg_check_ima_exception(chip, false);
return -EBUSY;
}
@@ -1154,15 +1529,16 @@
while (len > 0) {
num_bytes = (offset + len) > BUF_LEN ?
- (BUF_LEN - offset) : len;
+ (BUF_LEN - offset) : len;
/* write to byte_enable */
for (i = offset; i < (offset + num_bytes); i++)
byte_enable |= BIT(i);
rc = fg_write(chip, &byte_enable,
- chip->mem_base + MEM_INTF_IMA_BYTE_EN, 1);
+ chip->mem_base + MEM_INTF_IMA_BYTE_EN, 1);
if (rc) {
- pr_err("Unable to write to byte_en_reg rc=%d\n", rc);
+ pr_err("Unable to write to byte_en_reg rc=%d\n",
+ rc);
return rc;
}
/* write data */
@@ -1193,12 +1569,13 @@
rc = fg_check_iacs_ready(chip);
if (rc) {
- pr_debug("IACS_RDY failed rc=%d\n", rc);
+ pr_err("IACS_RDY failed post write to address %x offset %d rc=%d\n",
+ address, offset, rc);
return rc;
}
/* check for error condition */
- rc = fg_check_ima_exception(chip);
+ rc = fg_check_ima_exception(chip, false);
if (rc) {
pr_err("IMA transaction failed rc=%d", rc);
return rc;
@@ -1239,12 +1616,13 @@
rc = fg_check_iacs_ready(chip);
if (rc) {
- pr_debug("IACS_RDY failed rc=%d\n", rc);
+ pr_err("IACS_RDY failed post read for address %x offset %d rc=%d\n",
+ address, offset, rc);
return rc;
}
/* check for error condition */
- rc = fg_check_ima_exception(chip);
+ rc = fg_check_ima_exception(chip, false);
if (rc) {
pr_err("IMA transaction failed rc=%d", rc);
return rc;
@@ -1296,7 +1674,7 @@
* clear, then return an error instead of waiting for it again.
*/
if (time_count > 4) {
- pr_err("Waited for 1.5 seconds polling RIF_MEM_ACCESS_REQ\n");
+ pr_err("Waited for ~16ms polling RIF_MEM_ACCESS_REQ\n");
return -ETIMEDOUT;
}
@@ -1322,7 +1700,8 @@
rc = fg_check_iacs_ready(chip);
if (rc) {
- pr_debug("IACS_RDY failed rc=%d\n", rc);
+ pr_err("IACS_RDY failed before setting address: %x offset: %d rc=%d\n",
+ address, offset, rc);
return rc;
}
@@ -1335,7 +1714,8 @@
rc = fg_check_iacs_ready(chip);
if (rc)
- pr_debug("IACS_RDY failed rc=%d\n", rc);
+ pr_err("IACS_RDY failed after setting address: %x offset: %d rc=%d\n",
+ address, offset, rc);
return rc;
}
@@ -1346,10 +1726,13 @@
static int fg_interleaved_mem_read(struct fg_chip *chip, u8 *val, u16 address,
int len, int offset)
{
- int rc = 0, orig_address = address;
+ int rc = 0, ret, orig_address = address;
u8 start_beat_count, end_beat_count, count = 0;
bool retry = false;
+ if (chip->fg_shutdown)
+ return -EINVAL;
+
if (offset > 3) {
pr_err("offset too large %d\n", offset);
return -EINVAL;
@@ -1372,11 +1755,22 @@
}
mutex_lock(&chip->rw_lock);
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("Read for %d bytes is attempted @ 0x%x[%d]\n",
+ len, address, offset);
retry:
+ if (count >= RETRY_COUNT) {
+ pr_err("Retried reading 3 times\n");
+ retry = false;
+ goto out;
+ }
+
rc = fg_interleaved_mem_config(chip, val, address, offset, len, 0);
if (rc) {
pr_err("failed to configure SRAM for IMA rc = %d\n", rc);
+ retry = true;
+ count++;
goto out;
}
@@ -1385,18 +1779,21 @@
chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1);
if (rc) {
pr_err("failed to read beat count rc=%d\n", rc);
+ retry = true;
+ count++;
goto out;
}
/* read data */
rc = __fg_interleaved_mem_read(chip, val, address, offset, len);
if (rc) {
+ count++;
if ((rc == -EAGAIN) && (count < RETRY_COUNT)) {
- count++;
pr_err("IMA access failed retry_count = %d\n", count);
goto retry;
} else {
pr_err("failed to read SRAM address rc = %d\n", rc);
+ retry = true;
goto out;
}
}
@@ -1406,6 +1803,8 @@
chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1);
if (rc) {
pr_err("failed to read beat count rc=%d\n", rc);
+ retry = true;
+ count++;
goto out;
}
@@ -1418,12 +1817,13 @@
if (fg_debug_mask & FG_MEM_DEBUG_READS)
pr_info("Beat count do not match - retry transaction\n");
retry = true;
+ count++;
}
out:
/* Release IMA access */
- rc = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1);
- if (rc)
- pr_err("failed to reset IMA access bit rc = %d\n", rc);
+ ret = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1);
+ if (ret)
+ pr_err("failed to reset IMA access bit ret = %d\n", ret);
if (retry) {
retry = false;
@@ -1439,8 +1839,12 @@
static int fg_interleaved_mem_write(struct fg_chip *chip, u8 *val, u16 address,
int len, int offset)
{
- int rc = 0, orig_address = address;
+ int rc = 0, ret, orig_address = address;
u8 count = 0;
+ bool retry = false;
+
+ if (chip->fg_shutdown)
+ return -EINVAL;
if (address < RAM_OFFSET)
return -EINVAL;
@@ -1455,32 +1859,49 @@
offset = (orig_address + offset) % 4;
mutex_lock(&chip->rw_lock);
+ if (fg_debug_mask & FG_MEM_DEBUG_WRITES)
+ pr_info("Write for %d bytes is attempted @ 0x%x[%d]\n",
+ len, address, offset);
retry:
+ if (count >= RETRY_COUNT) {
+ pr_err("Retried writing 3 times\n");
+ retry = false;
+ goto out;
+ }
+
rc = fg_interleaved_mem_config(chip, val, address, offset, len, 1);
if (rc) {
- pr_err("failed to xonfigure SRAM for IMA rc = %d\n", rc);
+ pr_err("failed to configure SRAM for IMA rc = %d\n", rc);
+ retry = true;
+ count++;
goto out;
}
/* write data */
rc = __fg_interleaved_mem_write(chip, val, address, offset, len);
if (rc) {
+ count++;
if ((rc == -EAGAIN) && (count < RETRY_COUNT)) {
- count++;
pr_err("IMA access failed retry_count = %d\n", count);
goto retry;
} else {
pr_err("failed to write SRAM address rc = %d\n", rc);
+ retry = true;
goto out;
}
}
out:
/* Release IMA access */
- rc = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1);
- if (rc)
- pr_err("failed to reset IMA access bit rc = %d\n", rc);
+ ret = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1);
+ if (ret)
+ pr_err("failed to reset IMA access bit ret = %d\n", ret);
+
+ if (retry) {
+ retry = false;
+ goto retry;
+ }
mutex_unlock(&chip->rw_lock);
fg_relax(&chip->memif_wakeup_source);
@@ -1490,6 +1911,9 @@
static int fg_mem_read(struct fg_chip *chip, u8 *val, u16 address,
int len, int offset, bool keep_access)
{
+ if (chip->block_sram_access)
+ return -EBUSY;
+
if (chip->ima_supported)
return fg_interleaved_mem_read(chip, val, address,
len, offset);
@@ -1501,6 +1925,9 @@
static int fg_mem_write(struct fg_chip *chip, u8 *val, u16 address,
int len, int offset, bool keep_access)
{
+ if (chip->block_sram_access)
+ return -EBUSY;
+
if (chip->ima_supported)
return fg_interleaved_mem_write(chip, val, address,
len, offset);
@@ -1538,6 +1965,62 @@
return rc;
}
+static u8 sram_backup_buffer[100];
+static int fg_backup_sram_registers(struct fg_chip *chip, bool save)
+{
+ int rc, i, len, offset;
+ u16 address;
+ u8 *ptr;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("%sing SRAM registers\n", save ? "Back" : "Restor");
+
+ ptr = sram_backup_buffer;
+ for (i = 0; i < FG_BACKUP_MAX; i++) {
+ address = fg_backup_regs[i].address;
+ offset = fg_backup_regs[i].offset;
+ len = fg_backup_regs[i].len;
+ if (save)
+ rc = fg_interleaved_mem_read(chip, ptr, address,
+ len, offset);
+ else
+ rc = fg_interleaved_mem_write(chip, ptr, address,
+ len, offset);
+ if (rc) {
+ pr_err("Error in reading %d bytes from %x[%d], rc=%d\n",
+ len, address, offset, rc);
+ break;
+ }
+ ptr += len;
+ }
+
+ return rc;
+}
+
+#define SOC_FG_RESET 0xF3
+#define RESET_MASK (BIT(7) | BIT(5))
+static int fg_reset(struct fg_chip *chip, bool reset)
+{
+ int rc;
+
+ rc = fg_sec_masked_write(chip, chip->soc_base + SOC_FG_RESET,
+ 0xFF, reset ? RESET_MASK : 0, 1);
+ if (rc)
+ pr_err("Error in writing to 0x%x, rc=%d\n", SOC_FG_RESET, rc);
+
+ return rc;
+}
+
+static void fg_handle_battery_insertion(struct fg_chip *chip)
+{
+ reinit_completion(&chip->batt_id_avail);
+ reinit_completion(&chip->fg_reset_done);
+ schedule_delayed_work(&chip->batt_profile_init, 0);
+ cancel_delayed_work(&chip->update_sram_data);
+ schedule_delayed_work(&chip->update_sram_data, msecs_to_jiffies(0));
+}
+
+
static int soc_to_setpoint(int soc)
{
return DIV_ROUND_CLOSEST(soc * 255, 100);
@@ -1550,6 +2033,7 @@
val = DIV_ROUND_CLOSEST(vbatt_mv * 32768, 5000);
data[0] = val & 0xFF;
data[1] = val >> 8;
+ return;
}
static u8 batt_to_setpoint_8b(int vbatt_mv)
@@ -1678,14 +2162,37 @@
return rc;
}
+#define VBATT_LOW_STS_BIT BIT(2)
+static int fg_get_vbatt_status(struct fg_chip *chip, bool *vbatt_low_sts)
+{
+ int rc = 0;
+ u8 fg_batt_sts;
+
+ rc = fg_read(chip, &fg_batt_sts, INT_RT_STS(chip->batt_base), 1);
+ if (rc)
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->batt_base), rc);
+ else
+ *vbatt_low_sts = !!(fg_batt_sts & VBATT_LOW_STS_BIT);
+
+ return rc;
+}
+
#define SOC_EMPTY BIT(3)
static bool fg_is_batt_empty(struct fg_chip *chip)
{
u8 fg_soc_sts;
int rc;
+ bool vbatt_low_sts;
- rc = fg_read(chip, &fg_soc_sts,
- INT_RT_STS(chip->soc_base), 1);
+ if (chip->use_vbat_low_empty_soc) {
+ if (fg_get_vbatt_status(chip, &vbatt_low_sts))
+ return false;
+
+ return vbatt_low_sts;
+ }
+
+ rc = fg_read(chip, &fg_soc_sts, INT_RT_STS(chip->soc_base), 1);
if (rc) {
pr_err("spmi read failed: addr=%03X, rc=%d\n",
INT_RT_STS(chip->soc_base), rc);
@@ -1732,7 +2239,16 @@
#define FULL_SOC_RAW 0xFF
static int get_prop_capacity(struct fg_chip *chip)
{
- int msoc;
+ int msoc, rc;
+ bool vbatt_low_sts;
+
+ if (chip->use_last_soc && chip->last_soc) {
+ if (chip->last_soc == FULL_SOC_RAW)
+ return FULL_CAPACITY;
+ return DIV_ROUND_CLOSEST((chip->last_soc - 1) *
+ (FULL_CAPACITY - 2),
+ FULL_SOC_RAW - 2) + 1;
+ }
if (chip->battery_missing)
return MISSING_CAPACITY;
@@ -1747,10 +2263,28 @@
return EMPTY_CAPACITY;
}
msoc = get_monotonic_soc_raw(chip);
- if (msoc == 0)
- return EMPTY_CAPACITY;
- else if (msoc == FULL_SOC_RAW)
+ if (msoc == 0) {
+ if (fg_reset_on_lockup && chip->use_vbat_low_empty_soc) {
+ rc = fg_get_vbatt_status(chip, &vbatt_low_sts);
+ if (rc) {
+ pr_err("Error in reading vbatt_status, rc=%d\n",
+ rc);
+ return EMPTY_CAPACITY;
+ }
+
+ if (!vbatt_low_sts)
+ return DIV_ROUND_CLOSEST((chip->last_soc - 1) *
+ (FULL_CAPACITY - 2),
+ FULL_SOC_RAW - 2) + 1;
+ else
+ return EMPTY_CAPACITY;
+ } else {
+ return EMPTY_CAPACITY;
+ }
+ } else if (msoc == FULL_SOC_RAW) {
return FULL_CAPACITY;
+ }
+
return DIV_ROUND_CLOSEST((msoc - 1) * (FULL_CAPACITY - 2),
FULL_SOC_RAW - 2) + 1;
}
@@ -1843,6 +2377,25 @@
return 0;
}
+#define IGNORE_FALSE_NEGATIVE_ISENSE_BIT BIT(3)
+static int set_prop_ignore_false_negative_isense(struct fg_chip *chip,
+ bool ignore)
+{
+ int rc;
+
+ rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
+ IGNORE_FALSE_NEGATIVE_ISENSE_BIT,
+ ignore ? IGNORE_FALSE_NEGATIVE_ISENSE_BIT : 0,
+ EXTERNAL_SENSE_OFFSET);
+ if (rc) {
+ pr_err("failed to %s isense false negative ignore rc=%d\n",
+ ignore ? "enable" : "disable", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
#define EXPONENT_MASK 0xF800
#define MANTISSA_MASK 0x3FF
#define SIGN BIT(10)
@@ -1953,8 +2506,7 @@
return rc;
}
- if (fg_debug_mask & FG_IRQS)
- pr_info("fg batt sts 0x%x\n", fg_batt_sts);
+ pr_debug("fg batt sts 0x%x\n", fg_batt_sts);
return (fg_batt_sts & BATT_IDED) ? 1 : 0;
}
@@ -1984,7 +2536,7 @@
#define DECIKELVIN 2730
#define SRAM_PERIOD_NO_ID_UPDATE_MS 100
#define FULL_PERCENT_28BIT 0xFFFFFFF
-static void update_sram_data(struct fg_chip *chip, int *resched_ms)
+static int update_sram_data(struct fg_chip *chip, int *resched_ms)
{
int i, j, rc = 0;
u8 reg[4];
@@ -2060,6 +2612,31 @@
}
fg_mem_release(chip);
+ /* Backup the registers whenever no error happens during update */
+ if (fg_reset_on_lockup && !chip->ima_error_handling) {
+ if (!rc) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("backing up SRAM registers\n");
+ rc = fg_backup_sram_registers(chip, true);
+ if (rc) {
+ pr_err("Couldn't save sram registers\n");
+ goto out;
+ }
+ if (!chip->use_last_soc) {
+ chip->last_soc = get_monotonic_soc_raw(chip);
+ chip->last_cc_soc = div64_s64(
+ (int64_t)chip->last_soc *
+ FULL_PERCENT_28BIT, FULL_SOC_RAW);
+ }
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("last_soc: %d last_cc_soc: %lld\n",
+ chip->last_soc, chip->last_cc_soc);
+ } else {
+ pr_err("update_sram failed\n");
+ goto out;
+ }
+ }
+
if (!rc)
get_current_time(&chip->last_sram_update_time);
@@ -2070,7 +2647,55 @@
} else {
*resched_ms = SRAM_PERIOD_NO_ID_UPDATE_MS;
}
+out:
fg_relax(&chip->update_sram_wakeup_source);
+ return rc;
+}
+
+#define SANITY_CHECK_PERIOD_MS 5000
+static void check_sanity_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ check_sanity_work.work);
+ int rc = 0;
+ u8 beat_count;
+ bool tried_once = false;
+
+ fg_stay_awake(&chip->sanity_wakeup_source);
+
+try_again:
+ rc = fg_read(chip, &beat_count,
+ chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1);
+ if (rc) {
+ pr_err("failed to read beat count rc=%d\n", rc);
+ goto resched;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("current: %d, prev: %d\n", beat_count,
+ chip->last_beat_count);
+
+ if (chip->last_beat_count == beat_count) {
+ if (!tried_once) {
+ /* Wait for 1 FG cycle and read it once again */
+ msleep(1500);
+ tried_once = true;
+ goto try_again;
+ } else {
+ pr_err("Beat count not updating\n");
+ fg_check_ima_error_handling(chip);
+ goto out;
+ }
+ } else {
+ chip->last_beat_count = beat_count;
+ }
+resched:
+ schedule_delayed_work(
+ &chip->check_sanity_work,
+ msecs_to_jiffies(SANITY_CHECK_PERIOD_MS));
+out:
+ fg_relax(&chip->sanity_wakeup_source);
}
#define SRAM_TIMEOUT_MS 3000
@@ -2079,8 +2704,9 @@
struct fg_chip *chip = container_of(work,
struct fg_chip,
update_sram_data.work);
- int resched_ms = SRAM_PERIOD_NO_ID_UPDATE_MS, ret;
+ int resched_ms, ret;
bool tried_again = false;
+ int rc = 0;
wait:
/* Wait for MEMIF access revoked */
@@ -2094,14 +2720,19 @@
goto wait;
} else if (ret <= 0) {
pr_err("transaction timed out ret=%d\n", ret);
+ if (fg_is_batt_id_valid(chip))
+ resched_ms = fg_sram_update_period_ms;
+ else
+ resched_ms = SRAM_PERIOD_NO_ID_UPDATE_MS;
goto out;
}
- update_sram_data(chip, &resched_ms);
+ rc = update_sram_data(chip, &resched_ms);
out:
- schedule_delayed_work(
- &chip->update_sram_data,
- msecs_to_jiffies(resched_ms));
+ if (!rc)
+ schedule_delayed_work(
+ &chip->update_sram_data,
+ msecs_to_jiffies(resched_ms));
}
#define BATT_TEMP_OFFSET 3
@@ -2115,6 +2746,8 @@
TEMP_SENSE_CHARGE_BIT)
#define TEMP_PERIOD_UPDATE_MS 10000
#define TEMP_PERIOD_TIMEOUT_MS 3000
+#define BATT_TEMP_LOW_LIMIT -600
+#define BATT_TEMP_HIGH_LIMIT 1500
static void update_temp_data(struct work_struct *work)
{
s16 temp;
@@ -2166,14 +2799,44 @@
}
temp = reg[0] | (reg[1] << 8);
- fg_data[0].value = (temp * TEMP_LSB_16B / 1000)
- - DECIKELVIN;
+ temp = (temp * TEMP_LSB_16B / 1000) - DECIKELVIN;
+
+ /*
+ * If temperature is within the specified range (e.g. -60C and 150C),
+ * update it to the userspace. Otherwise, use the last read good
+ * temperature.
+ */
+ if (temp > chip->batt_temp_low_limit &&
+ temp < chip->batt_temp_high_limit) {
+ chip->last_good_temp = temp;
+ fg_data[0].value = temp;
+ } else {
+ fg_data[0].value = chip->last_good_temp;
+
+ /*
+ * If the temperature is read before and seems to be in valid
+ * range, then a bad temperature reading could be because of
+ * FG lockup. Trigger the FG reset sequence in such cases.
+ */
+ if (chip->last_temp_update_time && fg_reset_on_lockup &&
+ (chip->last_good_temp > chip->batt_temp_low_limit &&
+ chip->last_good_temp < chip->batt_temp_high_limit)) {
+ pr_err("Batt_temp is %d !, triggering FG reset\n",
+ temp);
+ fg_check_ima_error_handling(chip);
+ }
+ }
if (fg_debug_mask & FG_MEM_DEBUG_READS)
pr_info("BATT_TEMP %d %d\n", temp, fg_data[0].value);
get_current_time(&chip->last_temp_update_time);
+ if (chip->soc_slope_limiter_en) {
+ fg_stay_awake(&chip->slope_limit_wakeup_source);
+ schedule_work(&chip->slope_limiter_work);
+ }
+
out:
if (chip->sw_rbias_ctrl) {
rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
@@ -2226,18 +2889,6 @@
return rc;
}
-#define VBATT_LOW_STS_BIT BIT(2)
-static int fg_get_vbatt_status(struct fg_chip *chip, bool *vbatt_low_sts)
-{
- int rc = 0;
- u8 fg_batt_sts;
-
- rc = fg_read(chip, &fg_batt_sts, INT_RT_STS(chip->batt_base), 1);
- if (!rc)
- *vbatt_low_sts = !!(fg_batt_sts & VBATT_LOW_STS_BIT);
- return rc;
-}
-
#define BATT_CYCLE_NUMBER_REG 0x5E8
#define BATT_CYCLE_OFFSET 0
static void restore_cycle_counter(struct fg_chip *chip)
@@ -2301,6 +2952,9 @@
bucket, rc);
else
chip->cyc_ctr.count[bucket] = cyc_count;
+
+ if (fg_debug_mask & FG_POWER_SUPPLY)
+ pr_info("Stored bucket %d cyc_count: %d\n", bucket, cyc_count);
return rc;
}
@@ -2416,6 +3070,62 @@
return ((int)val) * 1000;
}
+#define SLOPE_LIMITER_COEFF_REG 0x430
+#define SLOPE_LIMITER_COEFF_OFFSET 3
+#define SLOPE_LIMIT_TEMP_THRESHOLD 100
+#define SLOPE_LIMIT_LOW_TEMP_CHG 45
+#define SLOPE_LIMIT_HIGH_TEMP_CHG 2
+#define SLOPE_LIMIT_LOW_TEMP_DISCHG 45
+#define SLOPE_LIMIT_HIGH_TEMP_DISCHG 2
+static void slope_limiter_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work, struct fg_chip,
+ slope_limiter_work);
+ enum slope_limit_status status;
+ int batt_temp, rc;
+ u8 buf[2];
+ int64_t val;
+
+ batt_temp = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
+
+ if (chip->status == POWER_SUPPLY_STATUS_CHARGING ||
+ chip->status == POWER_SUPPLY_STATUS_FULL) {
+ if (batt_temp < chip->slope_limit_temp)
+ status = LOW_TEMP_CHARGE;
+ else
+ status = HIGH_TEMP_CHARGE;
+ } else if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) {
+ if (batt_temp < chip->slope_limit_temp)
+ status = LOW_TEMP_DISCHARGE;
+ else
+ status = HIGH_TEMP_DISCHARGE;
+ } else {
+ goto out;
+ }
+
+ if (status == chip->slope_limit_sts)
+ goto out;
+
+ val = chip->slope_limit_coeffs[status];
+ val *= MICRO_UNIT;
+ half_float_to_buffer(val, buf);
+ rc = fg_mem_write(chip, buf,
+ SLOPE_LIMITER_COEFF_REG, 2,
+ SLOPE_LIMITER_COEFF_OFFSET, 0);
+ if (rc) {
+ pr_err("Couldn't write to slope_limiter_coeff_reg, rc=%d\n",
+ rc);
+ goto out;
+ }
+
+ chip->slope_limit_sts = status;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Slope limit sts: %d val: %lld buf[%x %x] written\n",
+ status, val, buf[0], buf[1]);
+out:
+ fg_relax(&chip->slope_limit_wakeup_source);
+}
+
static int lookup_ocv_for_soc(struct fg_chip *chip, int soc)
{
int64_t *coeffs;
@@ -2481,6 +3191,7 @@
#define ESR_ACTUAL_REG 0x554
#define BATTERY_ESR_REG 0x4F4
#define TEMP_RS_TO_RSLOW_REG 0x514
+#define ESR_OFFSET 2
static int estimate_battery_age(struct fg_chip *chip, int *actual_capacity)
{
int64_t ocv_cutoff_new, ocv_cutoff_aged, temp_rs_to_rslow;
@@ -2519,7 +3230,7 @@
rc = fg_mem_read(chip, buffer, ESR_ACTUAL_REG, 2, 2, 0);
esr_actual = half_float(buffer);
- rc |= fg_mem_read(chip, buffer, BATTERY_ESR_REG, 2, 2, 0);
+ rc |= fg_mem_read(chip, buffer, BATTERY_ESR_REG, 2, ESR_OFFSET, 0);
battery_esr = half_float(buffer);
if (rc) {
@@ -2594,124 +3305,6 @@
estimate_battery_age(chip, &chip->actual_cap_uah);
}
-static enum power_supply_property fg_power_props[] = {
- POWER_SUPPLY_PROP_CAPACITY,
- POWER_SUPPLY_PROP_CAPACITY_RAW,
- POWER_SUPPLY_PROP_CURRENT_NOW,
- POWER_SUPPLY_PROP_VOLTAGE_NOW,
- POWER_SUPPLY_PROP_VOLTAGE_OCV,
- POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
- POWER_SUPPLY_PROP_CHARGE_NOW,
- POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
- POWER_SUPPLY_PROP_CHARGE_NOW_ERROR,
- POWER_SUPPLY_PROP_CHARGE_FULL,
- POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
- POWER_SUPPLY_PROP_TEMP,
- POWER_SUPPLY_PROP_COOL_TEMP,
- POWER_SUPPLY_PROP_WARM_TEMP,
- POWER_SUPPLY_PROP_RESISTANCE,
- POWER_SUPPLY_PROP_RESISTANCE_ID,
- POWER_SUPPLY_PROP_BATTERY_TYPE,
- POWER_SUPPLY_PROP_UPDATE_NOW,
- POWER_SUPPLY_PROP_ESR_COUNT,
- POWER_SUPPLY_PROP_VOLTAGE_MIN,
- POWER_SUPPLY_PROP_CYCLE_COUNT,
- POWER_SUPPLY_PROP_CYCLE_COUNT_ID,
- POWER_SUPPLY_PROP_HI_POWER,
-};
-
-static int fg_power_get_property(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
-{
- struct fg_chip *chip = power_supply_get_drvdata(psy);
- bool vbatt_low_sts;
-
- switch (psp) {
- case POWER_SUPPLY_PROP_BATTERY_TYPE:
- if (chip->battery_missing)
- val->strval = missing_batt_type;
- else if (chip->fg_restarting)
- val->strval = loading_batt_type;
- else
- val->strval = chip->batt_type;
- break;
- case POWER_SUPPLY_PROP_CAPACITY:
- val->intval = get_prop_capacity(chip);
- break;
- case POWER_SUPPLY_PROP_CAPACITY_RAW:
- val->intval = get_sram_prop_now(chip, FG_DATA_BATT_SOC);
- break;
- case POWER_SUPPLY_PROP_CHARGE_NOW_ERROR:
- val->intval = get_sram_prop_now(chip, FG_DATA_VINT_ERR);
- break;
- case POWER_SUPPLY_PROP_CURRENT_NOW:
- val->intval = get_sram_prop_now(chip, FG_DATA_CURRENT);
- break;
- case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- val->intval = get_sram_prop_now(chip, FG_DATA_VOLTAGE);
- break;
- case POWER_SUPPLY_PROP_VOLTAGE_OCV:
- val->intval = get_sram_prop_now(chip, FG_DATA_OCV);
- break;
- case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
- val->intval = chip->batt_max_voltage_uv;
- break;
- case POWER_SUPPLY_PROP_TEMP:
- val->intval = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
- break;
- case POWER_SUPPLY_PROP_COOL_TEMP:
- val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_COLD);
- break;
- case POWER_SUPPLY_PROP_WARM_TEMP:
- val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_HOT);
- break;
- case POWER_SUPPLY_PROP_RESISTANCE:
- val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR);
- break;
- case POWER_SUPPLY_PROP_ESR_COUNT:
- val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR_COUNT);
- break;
- case POWER_SUPPLY_PROP_CYCLE_COUNT:
- val->intval = fg_get_cycle_count(chip);
- break;
- case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
- val->intval = chip->cyc_ctr.id;
- break;
- case POWER_SUPPLY_PROP_RESISTANCE_ID:
- val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ID);
- break;
- case POWER_SUPPLY_PROP_UPDATE_NOW:
- val->intval = 0;
- break;
- case POWER_SUPPLY_PROP_VOLTAGE_MIN:
- if (!fg_get_vbatt_status(chip, &vbatt_low_sts))
- val->intval = (int)vbatt_low_sts;
- else
- val->intval = 1;
- break;
- case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
- val->intval = chip->nom_cap_uah;
- break;
- case POWER_SUPPLY_PROP_CHARGE_FULL:
- val->intval = chip->learning_data.learned_cc_uah;
- break;
- case POWER_SUPPLY_PROP_CHARGE_NOW:
- val->intval = chip->learning_data.cc_uah;
- break;
- case POWER_SUPPLY_PROP_CHARGE_NOW_RAW:
- val->intval = get_sram_prop_now(chip, FG_DATA_CC_CHARGE);
- break;
- case POWER_SUPPLY_PROP_HI_POWER:
- val->intval = !!chip->bcl_lpm_disabled;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
static int correction_times[] = {
1470,
2940,
@@ -2853,11 +3446,8 @@
goto fail;
}
- if (chip->wa_flag & USE_CC_SOC_REG) {
- mutex_unlock(&chip->learning_data.learning_lock);
- fg_relax(&chip->capacity_learning_wakeup_source);
- return;
- }
+ if (chip->wa_flag & USE_CC_SOC_REG)
+ goto fail;
fg_mem_lock(chip);
@@ -2888,6 +3478,8 @@
pr_info("total_cc_uah = %lld\n", chip->learning_data.cc_uah);
fail:
+ if (chip->wa_flag & USE_CC_SOC_REG)
+ fg_relax(&chip->capacity_learning_wakeup_source);
mutex_unlock(&chip->learning_data.learning_lock);
return;
@@ -2901,7 +3493,7 @@
{
int rc;
u8 reg[4];
- unsigned int temp, magnitude;
+ int temp;
rc = fg_mem_read(chip, reg, CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0);
if (rc) {
@@ -2910,20 +3502,61 @@
}
temp = reg[3] << 24 | reg[2] << 16 | reg[1] << 8 | reg[0];
- magnitude = temp & CC_SOC_MAGNITUDE_MASK;
- if (temp & CC_SOC_NEGATIVE_BIT)
- *cc_soc = -1 * (~magnitude + 1);
- else
- *cc_soc = magnitude;
-
+ *cc_soc = sign_extend32(temp, 29);
return 0;
}
+static int fg_get_current_cc(struct fg_chip *chip)
+{
+ int cc_soc, rc;
+ int64_t current_capacity;
+
+ if (!(chip->wa_flag & USE_CC_SOC_REG))
+ return chip->learning_data.cc_uah;
+
+ if (!chip->learning_data.learned_cc_uah)
+ return -EINVAL;
+
+ rc = fg_get_cc_soc(chip, &cc_soc);
+ if (rc < 0) {
+ pr_err("Failed to get cc_soc, rc=%d\n", rc);
+ return rc;
+ }
+
+ current_capacity = cc_soc * chip->learning_data.learned_cc_uah;
+ current_capacity = div64_u64(current_capacity, FULL_PERCENT_28BIT);
+ return current_capacity;
+}
+
+#define BATT_MISSING_STS BIT(6)
+static bool is_battery_missing(struct fg_chip *chip)
+{
+ int rc;
+ u8 fg_batt_sts;
+
+ rc = fg_read(chip, &fg_batt_sts,
+ INT_RT_STS(chip->batt_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->batt_base), rc);
+ return false;
+ }
+
+ return (fg_batt_sts & BATT_MISSING_STS) ? true : false;
+}
+
static int fg_cap_learning_process_full_data(struct fg_chip *chip)
{
int cc_pc_val, rc = -EINVAL;
unsigned int cc_soc_delta_pc;
int64_t delta_cc_uah;
+ uint64_t temp;
+ bool batt_missing = is_battery_missing(chip);
+
+ if (batt_missing) {
+ pr_err("Battery is missing!\n");
+ goto fail;
+ }
if (!chip->learning_data.active)
goto fail;
@@ -2940,9 +3573,8 @@
goto fail;
}
- cc_soc_delta_pc = DIV_ROUND_CLOSEST(
- abs(cc_pc_val - chip->learning_data.init_cc_pc_val)
- * 100, FULL_PERCENT_28BIT);
+ temp = abs(cc_pc_val - chip->learning_data.init_cc_pc_val);
+ cc_soc_delta_pc = DIV_ROUND_CLOSEST_ULL(temp * 100, FULL_PERCENT_28BIT);
delta_cc_uah = div64_s64(
chip->learning_data.learned_cc_uah * cc_soc_delta_pc,
@@ -2950,8 +3582,11 @@
chip->learning_data.cc_uah = delta_cc_uah + chip->learning_data.cc_uah;
if (fg_debug_mask & FG_AGING)
- pr_info("current cc_soc=%d cc_soc_pc=%d total_cc_uah = %lld\n",
+ pr_info("current cc_soc=%d cc_soc_pc=%d init_cc_pc_val=%d delta_cc_uah=%lld learned_cc_uah=%lld total_cc_uah = %lld\n",
cc_pc_val, cc_soc_delta_pc,
+ chip->learning_data.init_cc_pc_val,
+ delta_cc_uah,
+ chip->learning_data.learned_cc_uah,
chip->learning_data.cc_uah);
return 0;
@@ -3044,6 +3679,12 @@
{
int16_t cc_mah;
int rc;
+ bool batt_missing = is_battery_missing(chip);
+
+ if (batt_missing) {
+ pr_err("Battery is missing!\n");
+ return;
+ }
cc_mah = div64_s64(chip->learning_data.learned_cc_uah, 1000);
@@ -3065,6 +3706,12 @@
static void fg_cap_learning_post_process(struct fg_chip *chip)
{
int64_t max_inc_val, min_dec_val, old_cap;
+ bool batt_missing = is_battery_missing(chip);
+
+ if (batt_missing) {
+ pr_err("Battery is missing!\n");
+ return;
+ }
max_inc_val = chip->learning_data.learned_cc_uah
* (1000 + chip->learning_data.max_increment);
@@ -3083,6 +3730,32 @@
chip->learning_data.learned_cc_uah =
chip->learning_data.cc_uah;
+ if (chip->learning_data.max_cap_limit) {
+ max_inc_val = (int64_t)chip->nom_cap_uah * (1000 +
+ chip->learning_data.max_cap_limit);
+ max_inc_val = div64_u64(max_inc_val, 1000);
+ if (chip->learning_data.cc_uah > max_inc_val) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("learning capacity %lld goes above max limit %lld\n",
+ chip->learning_data.cc_uah,
+ max_inc_val);
+ chip->learning_data.learned_cc_uah = max_inc_val;
+ }
+ }
+
+ if (chip->learning_data.min_cap_limit) {
+ min_dec_val = (int64_t)chip->nom_cap_uah * (1000 -
+ chip->learning_data.min_cap_limit);
+ min_dec_val = div64_u64(min_dec_val, 1000);
+ if (chip->learning_data.cc_uah < min_dec_val) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("learning capacity %lld goes below min limit %lld\n",
+ chip->learning_data.cc_uah,
+ min_dec_val);
+ chip->learning_data.learned_cc_uah = min_dec_val;
+ }
+ }
+
fg_cap_learning_save_data(chip);
if (fg_debug_mask & FG_AGING)
pr_info("final cc_uah = %lld, learned capacity %lld -> %lld uah\n",
@@ -3142,7 +3815,7 @@
if (battery_soc * 100 / FULL_PERCENT_3B
> chip->learning_data.max_start_soc) {
if (fg_debug_mask & FG_AGING)
- pr_info("battery soc too low (%d < %d), aborting\n",
+ pr_info("battery soc too high (%d > %d), aborting\n",
battery_soc * 100 / FULL_PERCENT_3B,
chip->learning_data.max_start_soc);
fg_mem_release(chip);
@@ -3226,6 +3899,17 @@
}
fg_cap_learning_stop(chip);
+ } else if (chip->status == POWER_SUPPLY_STATUS_FULL) {
+ if (chip->wa_flag & USE_CC_SOC_REG) {
+ /* reset SW_CC_SOC register to 100% upon charge_full */
+ rc = fg_mem_write(chip, (u8 *)&cc_pc_100,
+ CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0);
+ if (rc)
+ pr_err("Failed to reset CC_SOC_REG rc=%d\n",
+ rc);
+ else if (fg_debug_mask & FG_STATUS)
+ pr_info("Reset SW_CC_SOC to full value\n");
+ }
}
fail:
@@ -3323,7 +4007,14 @@
struct fg_chip,
status_change_work);
unsigned long current_time = 0;
- int cc_soc, rc, capacity = get_prop_capacity(chip);
+ int cc_soc, batt_soc, rc, capacity = get_prop_capacity(chip);
+ bool batt_missing = is_battery_missing(chip);
+
+ if (batt_missing) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Battery is missing\n");
+ return;
+ }
if (chip->esr_pulse_tune_en) {
fg_stay_awake(&chip->esr_extract_wakeup_source);
@@ -3343,19 +4034,34 @@
}
if (chip->status == POWER_SUPPLY_STATUS_FULL ||
chip->status == POWER_SUPPLY_STATUS_CHARGING) {
- if (!chip->vbat_low_irq_enabled) {
+ if (!chip->vbat_low_irq_enabled &&
+ !chip->use_vbat_low_empty_soc) {
enable_irq(chip->batt_irq[VBATT_LOW].irq);
enable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
chip->vbat_low_irq_enabled = true;
}
+
+ if (!chip->full_soc_irq_enabled) {
+ enable_irq(chip->soc_irq[FULL_SOC].irq);
+ enable_irq_wake(chip->soc_irq[FULL_SOC].irq);
+ chip->full_soc_irq_enabled = true;
+ }
+
if (!!(chip->wa_flag & PULSE_REQUEST_WA) && capacity == 100)
fg_configure_soc(chip);
} else if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) {
- if (chip->vbat_low_irq_enabled) {
+ if (chip->vbat_low_irq_enabled &&
+ !chip->use_vbat_low_empty_soc) {
disable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
chip->vbat_low_irq_enabled = false;
}
+
+ if (chip->full_soc_irq_enabled) {
+ disable_irq_wake(chip->soc_irq[FULL_SOC].irq);
+ disable_irq_nosync(chip->soc_irq[FULL_SOC].irq);
+ chip->full_soc_irq_enabled = false;
+ }
}
fg_cap_learning_check(chip);
schedule_work(&chip->update_esr_work);
@@ -3368,6 +4074,42 @@
}
if (chip->prev_status != chip->status && chip->last_sram_update_time) {
+ /*
+ * Reset SW_CC_SOC to a value based off battery SOC when
+ * the device is discharging.
+ */
+ if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) {
+ batt_soc = get_battery_soc_raw(chip);
+ if (!batt_soc)
+ return;
+
+ batt_soc = div64_s64((int64_t)batt_soc *
+ FULL_PERCENT_28BIT, FULL_PERCENT_3B);
+ rc = fg_mem_write(chip, (u8 *)&batt_soc,
+ CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0);
+ if (rc)
+ pr_err("Failed to reset CC_SOC_REG rc=%d\n",
+ rc);
+ else if (fg_debug_mask & FG_STATUS)
+ pr_info("Reset SW_CC_SOC to %x\n", batt_soc);
+ }
+
+ /*
+ * Schedule the update_temp_work whenever there is a status
+ * change. This is essential for applying the slope limiter
+ * coefficients when that feature is enabled.
+ */
+ if (chip->last_temp_update_time && chip->soc_slope_limiter_en) {
+ cancel_delayed_work_sync(&chip->update_temp_work);
+ schedule_delayed_work(&chip->update_temp_work,
+ msecs_to_jiffies(0));
+ }
+
+ if (chip->dischg_gain.enable) {
+ fg_stay_awake(&chip->dischg_gain_wakeup_source);
+ schedule_work(&chip->dischg_gain_work);
+ }
+
get_current_time(¤t_time);
/*
* When charging status changes, update SRAM parameters if it
@@ -3393,10 +4135,10 @@
}
if ((chip->wa_flag & USE_CC_SOC_REG) && chip->bad_batt_detection_en
&& chip->safety_timer_expired) {
- chip->sw_cc_soc_data.delta_soc =
- DIV_ROUND_CLOSEST(abs(cc_soc -
- chip->sw_cc_soc_data.init_cc_soc)
- * 100, FULL_PERCENT_28BIT);
+ uint64_t delta_cc_soc = abs(cc_soc -
+ chip->sw_cc_soc_data.init_cc_soc);
+ chip->sw_cc_soc_data.delta_soc = DIV_ROUND_CLOSEST_ULL(
+ delta_cc_soc * 100, FULL_PERCENT_28BIT);
chip->sw_cc_soc_data.full_capacity =
chip->sw_cc_soc_data.delta_soc +
chip->sw_cc_soc_data.init_sys_soc;
@@ -3539,6 +4281,395 @@
return rc;
}
+static int fg_restore_cc_soc(struct fg_chip *chip)
+{
+ int rc;
+
+ if (!chip->use_last_cc_soc || !chip->last_cc_soc)
+ return 0;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Restoring cc_soc: %lld\n", chip->last_cc_soc);
+
+ rc = fg_mem_write(chip, (u8 *)&chip->last_cc_soc,
+ fg_data[FG_DATA_CC_CHARGE].address, 4,
+ fg_data[FG_DATA_CC_CHARGE].offset, 0);
+ if (rc)
+ pr_err("failed to update CC_SOC rc=%d\n", rc);
+ else
+ chip->use_last_cc_soc = false;
+
+ return rc;
+}
+
+#define SRAM_MONOTONIC_SOC_REG 0x574
+#define SRAM_MONOTONIC_SOC_OFFSET 2
+static int fg_restore_soc(struct fg_chip *chip)
+{
+ int rc;
+ u16 msoc;
+
+ if (chip->use_last_soc && chip->last_soc)
+ msoc = DIV_ROUND_CLOSEST(chip->last_soc * 0xFFFF,
+ FULL_SOC_RAW);
+ else
+ return 0;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Restored soc: %d\n", msoc);
+
+ rc = fg_mem_write(chip, (u8 *)&msoc, SRAM_MONOTONIC_SOC_REG, 2,
+ SRAM_MONOTONIC_SOC_OFFSET, 0);
+ if (rc)
+ pr_err("failed to write M_SOC_REG rc=%d\n", rc);
+
+ return rc;
+}
+
+#define NOM_CAP_REG 0x4F4
+#define CAPACITY_DELTA_DECIPCT 500
+static int load_battery_aging_data(struct fg_chip *chip)
+{
+ int rc = 0;
+ u8 buffer[2];
+ int16_t cc_mah;
+ int64_t delta_cc_uah, pct_nom_cap_uah;
+
+ rc = fg_mem_read(chip, buffer, NOM_CAP_REG, 2, 0, 0);
+ if (rc) {
+ pr_err("Failed to read nominal capacitance: %d\n", rc);
+ goto out;
+ }
+
+ chip->nom_cap_uah = bcap_uah_2b(buffer);
+ chip->actual_cap_uah = chip->nom_cap_uah;
+
+ if (chip->learning_data.learned_cc_uah == 0) {
+ chip->learning_data.learned_cc_uah = chip->nom_cap_uah;
+ fg_cap_learning_save_data(chip);
+ } else if (chip->learning_data.feedback_on) {
+ delta_cc_uah = abs(chip->learning_data.learned_cc_uah -
+ chip->nom_cap_uah);
+ pct_nom_cap_uah = div64_s64((int64_t)chip->nom_cap_uah *
+ CAPACITY_DELTA_DECIPCT, 1000);
+ /*
+ * If the learned capacity is out of range, say by 50%
+ * from the nominal capacity, then overwrite the learned
+ * capacity with the nominal capacity.
+ */
+ if (chip->nom_cap_uah && delta_cc_uah > pct_nom_cap_uah) {
+ if (fg_debug_mask & FG_AGING) {
+ pr_info("learned_cc_uah: %lld is higher than expected\n",
+ chip->learning_data.learned_cc_uah);
+ pr_info("Capping it to nominal:%d\n",
+ chip->nom_cap_uah);
+ }
+ chip->learning_data.learned_cc_uah = chip->nom_cap_uah;
+ fg_cap_learning_save_data(chip);
+ } else {
+ cc_mah = div64_s64(chip->learning_data.learned_cc_uah,
+ 1000);
+ rc = fg_calc_and_store_cc_soc_coeff(chip, cc_mah);
+ if (rc)
+ pr_err("Error in restoring cc_soc_coeff, rc:%d\n",
+ rc);
+ }
+ }
+out:
+ return rc;
+}
+
+static void fg_restore_battery_info(struct fg_chip *chip)
+{
+ int rc;
+ char buf[4] = {0, 0, 0, 0};
+
+ chip->last_soc = DIV_ROUND_CLOSEST(chip->batt_info[BATT_INFO_SOC] *
+ FULL_SOC_RAW, FULL_CAPACITY);
+ chip->last_cc_soc = div64_s64((int64_t)chip->last_soc *
+ FULL_PERCENT_28BIT, FULL_SOC_RAW);
+ chip->use_last_soc = true;
+ chip->use_last_cc_soc = true;
+ rc = fg_restore_soc(chip);
+ if (rc) {
+ pr_err("Error in restoring soc, rc=%d\n", rc);
+ goto out;
+ }
+
+ rc = fg_restore_cc_soc(chip);
+ if (rc) {
+ pr_err("Error in restoring cc_soc, rc=%d\n", rc);
+ goto out;
+ }
+
+ rc = fg_mem_write(chip, buf,
+ fg_data[FG_DATA_VINT_ERR].address,
+ fg_data[FG_DATA_VINT_ERR].len,
+ fg_data[FG_DATA_VINT_ERR].offset, 0);
+ if (rc) {
+ pr_err("Failed to write to VINT_ERR, rc=%d\n", rc);
+ goto out;
+ }
+
+ chip->learning_data.learned_cc_uah = chip->batt_info[BATT_INFO_FCC];
+ rc = load_battery_aging_data(chip);
+ if (rc) {
+ pr_err("Failed to load battery aging data, rc:%d\n", rc);
+ goto out;
+ }
+
+ if (chip->power_supply_registered)
+ power_supply_changed(chip->bms_psy);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Restored battery info!\n");
+
+out:
+ return;
+}
+
+#define DELTA_BATT_TEMP 30
+static bool fg_validate_battery_info(struct fg_chip *chip)
+{
+ int i, delta_pct, batt_id_kohm, batt_temp, batt_volt_mv, batt_soc;
+
+ for (i = 1; i < BATT_INFO_MAX; i++) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("batt_info[%d]: %d\n", i, chip->batt_info[i]);
+
+ if ((chip->batt_info[i] == 0 && i != BATT_INFO_TEMP) ||
+ chip->batt_info[i] == INT_MAX) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("batt_info[%d]:%d is invalid\n", i,
+ chip->batt_info[i]);
+ return false;
+ }
+ }
+
+ batt_id_kohm = get_sram_prop_now(chip, FG_DATA_BATT_ID) / 1000;
+ if (batt_id_kohm != chip->batt_info[BATT_INFO_RES_ID]) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("batt_id(%dK) does not match the stored batt_id(%dK)\n",
+ batt_id_kohm,
+ chip->batt_info[BATT_INFO_RES_ID]);
+ return false;
+ }
+
+ batt_temp = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
+ if (abs(chip->batt_info[BATT_INFO_TEMP] - batt_temp) >
+ DELTA_BATT_TEMP) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("batt_temp(%d) is higher/lower than stored batt_temp(%d)\n",
+ batt_temp, chip->batt_info[BATT_INFO_TEMP]);
+ return false;
+ }
+
+ if (chip->batt_info[BATT_INFO_FCC] < 0) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("batt_fcc cannot be %d\n",
+ chip->batt_info[BATT_INFO_FCC]);
+ return false;
+ }
+
+ batt_volt_mv = get_sram_prop_now(chip, FG_DATA_VOLTAGE) / 1000;
+ batt_soc = get_monotonic_soc_raw(chip);
+ if (batt_soc != 0 && batt_soc != FULL_SOC_RAW)
+ batt_soc = DIV_ROUND_CLOSEST((batt_soc - 1) *
+ (FULL_CAPACITY - 2), FULL_SOC_RAW - 2) + 1;
+
+ if (*chip->batt_range_ocv && chip->batt_max_voltage_uv > 1000)
+ delta_pct = DIV_ROUND_CLOSEST(abs(batt_volt_mv -
+ chip->batt_info[BATT_INFO_VOLTAGE]) * 100,
+ chip->batt_max_voltage_uv / 1000);
+ else
+ delta_pct = abs(batt_soc - chip->batt_info[BATT_INFO_SOC]);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Validating by %s batt_voltage:%d capacity:%d delta_pct:%d\n",
+ *chip->batt_range_ocv ? "OCV" : "SOC", batt_volt_mv,
+ batt_soc, delta_pct);
+
+ if (*chip->batt_range_pct && delta_pct > *chip->batt_range_pct) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("delta_pct(%d) is higher than batt_range_pct(%d)\n",
+ delta_pct, *chip->batt_range_pct);
+ return false;
+ }
+
+ return true;
+}
+
+static int fg_set_battery_info(struct fg_chip *chip, int val)
+{
+ if (chip->batt_info_id < 0 ||
+ chip->batt_info_id >= BATT_INFO_MAX) {
+ pr_err("Invalid batt_info_id %d\n", chip->batt_info_id);
+ chip->batt_info_id = 0;
+ return -EINVAL;
+ }
+
+ if (chip->batt_info_id == BATT_INFO_NOTIFY && val == INT_MAX - 1) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Notified from userspace\n");
+ if (chip->batt_info_restore && !chip->ima_error_handling) {
+ if (!fg_validate_battery_info(chip)) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Validating battery info failed\n");
+ } else {
+ fg_restore_battery_info(chip);
+ }
+ }
+ }
+
+ chip->batt_info[chip->batt_info_id] = val;
+ return 0;
+}
+
+static enum power_supply_property fg_power_props[] = {
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_RAW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_OCV,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
+ POWER_SUPPLY_PROP_CHARGE_NOW_ERROR,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_COOL_TEMP,
+ POWER_SUPPLY_PROP_WARM_TEMP,
+ POWER_SUPPLY_PROP_RESISTANCE,
+ POWER_SUPPLY_PROP_RESISTANCE_ID,
+ POWER_SUPPLY_PROP_BATTERY_TYPE,
+ POWER_SUPPLY_PROP_UPDATE_NOW,
+ POWER_SUPPLY_PROP_ESR_COUNT,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_CYCLE_COUNT_ID,
+ POWER_SUPPLY_PROP_HI_POWER,
+ POWER_SUPPLY_PROP_SOC_REPORTING_READY,
+ POWER_SUPPLY_PROP_IGNORE_FALSE_NEGATIVE_ISENSE,
+ POWER_SUPPLY_PROP_ENABLE_JEITA_DETECTION,
+ POWER_SUPPLY_PROP_BATTERY_INFO,
+ POWER_SUPPLY_PROP_BATTERY_INFO_ID,
+};
+
+static int fg_power_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct fg_chip *chip = power_supply_get_drvdata(psy);
+ bool vbatt_low_sts;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_BATTERY_TYPE:
+ if (chip->battery_missing)
+ val->strval = missing_batt_type;
+ else if (chip->fg_restarting)
+ val->strval = loading_batt_type;
+ else
+ val->strval = chip->batt_type;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = get_prop_capacity(chip);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY_RAW:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_SOC);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW_ERROR:
+ val->intval = get_sram_prop_now(chip, FG_DATA_VINT_ERR);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = get_sram_prop_now(chip, FG_DATA_CURRENT);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = get_sram_prop_now(chip, FG_DATA_VOLTAGE);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_OCV:
+ val->intval = get_sram_prop_now(chip, FG_DATA_OCV);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = chip->batt_max_voltage_uv;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
+ break;
+ case POWER_SUPPLY_PROP_COOL_TEMP:
+ val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_COLD);
+ break;
+ case POWER_SUPPLY_PROP_WARM_TEMP:
+ val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_HOT);
+ break;
+ case POWER_SUPPLY_PROP_RESISTANCE:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR);
+ break;
+ case POWER_SUPPLY_PROP_ESR_COUNT:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR_COUNT);
+ break;
+ case POWER_SUPPLY_PROP_CYCLE_COUNT:
+ val->intval = fg_get_cycle_count(chip);
+ break;
+ case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+ val->intval = chip->cyc_ctr.id;
+ break;
+ case POWER_SUPPLY_PROP_RESISTANCE_ID:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ID);
+ break;
+ case POWER_SUPPLY_PROP_UPDATE_NOW:
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ if (!fg_get_vbatt_status(chip, &vbatt_low_sts))
+ val->intval = (int)vbatt_low_sts;
+ else
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ val->intval = chip->nom_cap_uah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = chip->learning_data.learned_cc_uah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ val->intval = chip->learning_data.cc_uah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW_RAW:
+ val->intval = get_sram_prop_now(chip, FG_DATA_CC_CHARGE);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+ val->intval = fg_get_current_cc(chip);
+ break;
+ case POWER_SUPPLY_PROP_HI_POWER:
+ val->intval = !!chip->bcl_lpm_disabled;
+ break;
+ case POWER_SUPPLY_PROP_SOC_REPORTING_READY:
+ val->intval = !!chip->soc_reporting_ready;
+ break;
+ case POWER_SUPPLY_PROP_IGNORE_FALSE_NEGATIVE_ISENSE:
+ val->intval = !chip->allow_false_negative_isense;
+ break;
+ case POWER_SUPPLY_PROP_ENABLE_JEITA_DETECTION:
+ val->intval = chip->use_soft_jeita_irq;
+ break;
+ case POWER_SUPPLY_PROP_BATTERY_INFO:
+ if (chip->batt_info_id < 0 ||
+ chip->batt_info_id >= BATT_INFO_MAX)
+ return -EINVAL;
+ val->intval = chip->batt_info[chip->batt_info_id];
+ break;
+ case POWER_SUPPLY_PROP_BATTERY_INFO_ID:
+ val->intval = chip->batt_info_id;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int fg_power_set_property(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val)
@@ -3557,6 +4688,67 @@
if (val->intval)
update_sram_data(chip, &unused);
break;
+ case POWER_SUPPLY_PROP_IGNORE_FALSE_NEGATIVE_ISENSE:
+ rc = set_prop_ignore_false_negative_isense(chip, !!val->intval);
+ if (rc)
+ pr_err("set_prop_ignore_false_negative_isense failed, rc=%d\n",
+ rc);
+ else
+ chip->allow_false_negative_isense = !val->intval;
+ break;
+ case POWER_SUPPLY_PROP_ENABLE_JEITA_DETECTION:
+ if (chip->use_soft_jeita_irq == !!val->intval) {
+ pr_debug("JEITA irq %s, ignore!\n",
+ chip->use_soft_jeita_irq ?
+ "enabled" : "disabled");
+ break;
+ }
+ chip->use_soft_jeita_irq = !!val->intval;
+ if (chip->use_soft_jeita_irq) {
+ if (chip->batt_irq[JEITA_SOFT_COLD].disabled) {
+ enable_irq(
+ chip->batt_irq[JEITA_SOFT_COLD].irq);
+ chip->batt_irq[JEITA_SOFT_COLD].disabled =
+ false;
+ }
+ if (!chip->batt_irq[JEITA_SOFT_COLD].wakeup) {
+ enable_irq_wake(
+ chip->batt_irq[JEITA_SOFT_COLD].irq);
+ chip->batt_irq[JEITA_SOFT_COLD].wakeup = true;
+ }
+ if (chip->batt_irq[JEITA_SOFT_HOT].disabled) {
+ enable_irq(
+ chip->batt_irq[JEITA_SOFT_HOT].irq);
+ chip->batt_irq[JEITA_SOFT_HOT].disabled = false;
+ }
+ if (!chip->batt_irq[JEITA_SOFT_HOT].wakeup) {
+ enable_irq_wake(
+ chip->batt_irq[JEITA_SOFT_HOT].irq);
+ chip->batt_irq[JEITA_SOFT_HOT].wakeup = true;
+ }
+ } else {
+ if (chip->batt_irq[JEITA_SOFT_COLD].wakeup) {
+ disable_irq_wake(
+ chip->batt_irq[JEITA_SOFT_COLD].irq);
+ chip->batt_irq[JEITA_SOFT_COLD].wakeup = false;
+ }
+ if (!chip->batt_irq[JEITA_SOFT_COLD].disabled) {
+ disable_irq_nosync(
+ chip->batt_irq[JEITA_SOFT_COLD].irq);
+ chip->batt_irq[JEITA_SOFT_COLD].disabled = true;
+ }
+ if (chip->batt_irq[JEITA_SOFT_HOT].wakeup) {
+ disable_irq_wake(
+ chip->batt_irq[JEITA_SOFT_HOT].irq);
+ chip->batt_irq[JEITA_SOFT_HOT].wakeup = false;
+ }
+ if (!chip->batt_irq[JEITA_SOFT_HOT].disabled) {
+ disable_irq_nosync(
+ chip->batt_irq[JEITA_SOFT_HOT].irq);
+ chip->batt_irq[JEITA_SOFT_HOT].disabled = true;
+ }
+ }
+ break;
case POWER_SUPPLY_PROP_STATUS:
chip->prev_status = chip->status;
chip->status = val->intval;
@@ -3599,6 +4791,12 @@
schedule_work(&chip->bcl_hi_power_work);
}
break;
+ case POWER_SUPPLY_PROP_BATTERY_INFO:
+ rc = fg_set_battery_info(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_BATTERY_INFO_ID:
+ chip->batt_info_id = val->intval;
+ break;
default:
return -EINVAL;
};
@@ -3613,6 +4811,8 @@
case POWER_SUPPLY_PROP_COOL_TEMP:
case POWER_SUPPLY_PROP_WARM_TEMP:
case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+ case POWER_SUPPLY_PROP_BATTERY_INFO:
+ case POWER_SUPPLY_PROP_BATTERY_INFO_ID:
return 1;
default:
break;
@@ -3807,21 +5007,197 @@
fg_relax(&chip->gain_comp_wakeup_source);
}
-#define BATT_MISSING_STS BIT(6)
-static bool is_battery_missing(struct fg_chip *chip)
+static void cc_soc_store_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work, struct fg_chip,
+ cc_soc_store_work);
+ int cc_soc_pct;
+
+ if (!chip->nom_cap_uah) {
+ pr_err("nom_cap_uah zero!\n");
+ fg_relax(&chip->cc_soc_wakeup_source);
+ return;
+ }
+
+ cc_soc_pct = get_sram_prop_now(chip, FG_DATA_CC_CHARGE);
+ cc_soc_pct = div64_s64(cc_soc_pct * 100,
+ chip->nom_cap_uah);
+ chip->last_cc_soc = div64_s64((int64_t)chip->last_soc *
+ FULL_PERCENT_28BIT, FULL_SOC_RAW);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("cc_soc_pct: %d last_cc_soc: %lld\n", cc_soc_pct,
+ chip->last_cc_soc);
+
+ if (fg_reset_on_lockup && (chip->cc_soc_limit_pct > 0 &&
+ cc_soc_pct >= chip->cc_soc_limit_pct)) {
+ pr_err("CC_SOC out of range\n");
+ fg_check_ima_error_handling(chip);
+ }
+
+ fg_relax(&chip->cc_soc_wakeup_source);
+}
+
+#define HARD_JEITA_ALARM_CHECK_NS 10000000000
+static enum alarmtimer_restart fg_hard_jeita_alarm_cb(struct alarm *alarm,
+ ktime_t now)
+{
+ struct fg_chip *chip = container_of(alarm,
+ struct fg_chip, hard_jeita_alarm);
+ int rc, health = POWER_SUPPLY_HEALTH_UNKNOWN;
+ u8 regval;
+ bool batt_hot, batt_cold;
+ union power_supply_propval val = {0, };
+
+ if (!is_usb_present(chip)) {
+ pr_debug("USB plugged out, stop the timer!\n");
+ return ALARMTIMER_NORESTART;
+ }
+
+ rc = fg_read(chip, ®val, BATT_INFO_STS(chip->batt_base), 1);
+ if (rc) {
+ pr_err("read batt_sts failed, rc=%d\n", rc);
+ goto recheck;
+ }
+
+ batt_hot = !!(regval & JEITA_HARD_HOT_RT_STS);
+ batt_cold = !!(regval & JEITA_HARD_COLD_RT_STS);
+ if (batt_hot && batt_cold) {
+ pr_debug("Hot && cold can't co-exist\n");
+ goto recheck;
+ }
+
+ if ((batt_hot == chip->batt_hot) && (batt_cold == chip->batt_cold)) {
+ pr_debug("battery JEITA state not changed, ignore\n");
+ goto recheck;
+ }
+
+ if (batt_cold != chip->batt_cold) {
+ /* cool --> cold */
+ if (chip->batt_cool) {
+ chip->batt_cool = false;
+ chip->batt_cold = true;
+ health = POWER_SUPPLY_HEALTH_COLD;
+ } else if (chip->batt_cold) { /* cold --> cool */
+ chip->batt_cool = true;
+ chip->batt_cold = false;
+ health = POWER_SUPPLY_HEALTH_COOL;
+ }
+ }
+
+ if (batt_hot != chip->batt_hot) {
+ /* warm --> hot */
+ if (chip->batt_warm) {
+ chip->batt_warm = false;
+ chip->batt_hot = true;
+ health = POWER_SUPPLY_HEALTH_OVERHEAT;
+ } else if (chip->batt_hot) { /* hot --> warm */
+ chip->batt_hot = false;
+ chip->batt_warm = true;
+ health = POWER_SUPPLY_HEALTH_WARM;
+ }
+ }
+
+ if (health != POWER_SUPPLY_HEALTH_UNKNOWN) {
+ pr_debug("FG report battery health: %d\n", health);
+ val.intval = health;
+ rc = power_supply_set_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_HEALTH, &val);
+ if (rc)
+ pr_err("Set batt_psy health: %d failed\n", health);
+ }
+
+recheck:
+ alarm_forward_now(alarm, ns_to_ktime(HARD_JEITA_ALARM_CHECK_NS));
+ return ALARMTIMER_RESTART;
+}
+
+#define BATT_SOFT_COLD_STS BIT(0)
+#define BATT_SOFT_HOT_STS BIT(1)
+static irqreturn_t fg_jeita_soft_hot_irq_handler(int irq, void *_chip)
{
int rc;
- u8 fg_batt_sts;
+ struct fg_chip *chip = _chip;
+ u8 regval;
+ bool batt_warm;
+ union power_supply_propval val = {0, };
- rc = fg_read(chip, &fg_batt_sts,
- INT_RT_STS(chip->batt_base), 1);
+ if (!is_charger_available(chip))
+ return IRQ_HANDLED;
+
+ rc = fg_read(chip, ®val, INT_RT_STS(chip->batt_base), 1);
if (rc) {
pr_err("spmi read failed: addr=%03X, rc=%d\n",
INT_RT_STS(chip->batt_base), rc);
- return false;
+ return IRQ_HANDLED;
}
- return (fg_batt_sts & BATT_MISSING_STS) ? true : false;
+ batt_warm = !!(regval & BATT_SOFT_HOT_STS);
+ if (chip->batt_warm == batt_warm) {
+ pr_debug("warm state not change, ignore!\n");
+ return IRQ_HANDLED;
+ }
+
+ chip->batt_warm = batt_warm;
+ if (batt_warm) {
+ val.intval = POWER_SUPPLY_HEALTH_WARM;
+ power_supply_set_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_HEALTH, &val);
+ /* kick the alarm timer for hard hot polling */
+ alarm_start_relative(&chip->hard_jeita_alarm,
+ ns_to_ktime(HARD_JEITA_ALARM_CHECK_NS));
+ } else {
+ val.intval = POWER_SUPPLY_HEALTH_GOOD;
+ power_supply_set_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_HEALTH, &val);
+ /* cancel the alarm timer */
+ alarm_try_to_cancel(&chip->hard_jeita_alarm);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_jeita_soft_cold_irq_handler(int irq, void *_chip)
+{
+ int rc;
+ struct fg_chip *chip = _chip;
+ u8 regval;
+ bool batt_cool;
+ union power_supply_propval val = {0, };
+
+ if (!is_charger_available(chip))
+ return IRQ_HANDLED;
+
+ rc = fg_read(chip, ®val, INT_RT_STS(chip->batt_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->batt_base), rc);
+ return IRQ_HANDLED;
+ }
+
+ batt_cool = !!(regval & BATT_SOFT_COLD_STS);
+ if (chip->batt_cool == batt_cool) {
+ pr_debug("cool state not change, ignore\n");
+ return IRQ_HANDLED;
+ }
+
+ chip->batt_cool = batt_cool;
+ if (batt_cool) {
+ val.intval = POWER_SUPPLY_HEALTH_COOL;
+ power_supply_set_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_HEALTH, &val);
+ /* kick the alarm timer for hard cold polling */
+ alarm_start_relative(&chip->hard_jeita_alarm,
+ ns_to_ktime(HARD_JEITA_ALARM_CHECK_NS));
+ } else {
+ val.intval = POWER_SUPPLY_HEALTH_GOOD;
+ power_supply_set_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_HEALTH, &val);
+ /* cancel the alarm timer */
+ alarm_try_to_cancel(&chip->hard_jeita_alarm);
+ }
+
+ return IRQ_HANDLED;
}
#define SOC_FIRST_EST_DONE BIT(5)
@@ -3841,21 +5217,40 @@
return (fg_soc_sts & SOC_FIRST_EST_DONE) ? true : false;
}
+#define FG_EMPTY_DEBOUNCE_MS 1500
static irqreturn_t fg_vbatt_low_handler(int irq, void *_chip)
{
struct fg_chip *chip = _chip;
- int rc;
bool vbatt_low_sts;
if (fg_debug_mask & FG_IRQS)
pr_info("vbatt-low triggered\n");
- if (chip->status == POWER_SUPPLY_STATUS_CHARGING) {
- rc = fg_get_vbatt_status(chip, &vbatt_low_sts);
- if (rc) {
- pr_err("error in reading vbatt_status, rc:%d\n", rc);
+ /* handle empty soc based on vbatt-low interrupt */
+ if (chip->use_vbat_low_empty_soc) {
+ if (fg_get_vbatt_status(chip, &vbatt_low_sts))
goto out;
+
+ if (vbatt_low_sts) {
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("Vbatt is low\n");
+ disable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+ disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
+ chip->vbat_low_irq_enabled = false;
+ fg_stay_awake(&chip->empty_check_wakeup_source);
+ schedule_delayed_work(&chip->check_empty_work,
+ msecs_to_jiffies(FG_EMPTY_DEBOUNCE_MS));
+ } else {
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("Vbatt is high\n");
+ chip->soc_empty = false;
}
+ goto out;
+ }
+
+ if (chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ if (fg_get_vbatt_status(chip, &vbatt_low_sts))
+ goto out;
if (!vbatt_low_sts && chip->vbat_low_irq_enabled) {
if (fg_debug_mask & FG_IRQS)
pr_info("disabling vbatt_low irq\n");
@@ -3876,8 +5271,10 @@
bool batt_missing = is_battery_missing(chip);
if (batt_missing) {
+ fg_cap_learning_stop(chip);
chip->battery_missing = true;
chip->profile_loaded = false;
+ chip->soc_reporting_ready = false;
chip->batt_type = default_batt_type;
mutex_lock(&chip->cyc_ctr.lock);
if (fg_debug_mask & FG_IRQS)
@@ -3885,17 +5282,10 @@
clear_cycle_counter(chip);
mutex_unlock(&chip->cyc_ctr.lock);
} else {
- if (!chip->use_otp_profile) {
- reinit_completion(&chip->batt_id_avail);
- reinit_completion(&chip->first_soc_done);
- schedule_delayed_work(&chip->batt_profile_init, 0);
- cancel_delayed_work(&chip->update_sram_data);
- schedule_delayed_work(
- &chip->update_sram_data,
- msecs_to_jiffies(0));
- } else {
+ if (!chip->use_otp_profile)
+ fg_handle_battery_insertion(chip);
+ else
chip->battery_missing = false;
- }
}
if (fg_debug_mask & FG_IRQS)
@@ -3943,7 +5333,7 @@
{
struct fg_chip *chip = _chip;
u8 soc_rt_sts;
- int rc;
+ int rc, msoc;
rc = fg_read(chip, &soc_rt_sts, INT_RT_STS(chip->soc_base), 1);
if (rc) {
@@ -3954,6 +5344,37 @@
if (fg_debug_mask & FG_IRQS)
pr_info("triggered 0x%x\n", soc_rt_sts);
+ if (chip->dischg_gain.enable) {
+ fg_stay_awake(&chip->dischg_gain_wakeup_source);
+ schedule_work(&chip->dischg_gain_work);
+ }
+
+ if (chip->soc_slope_limiter_en) {
+ fg_stay_awake(&chip->slope_limit_wakeup_source);
+ schedule_work(&chip->slope_limiter_work);
+ }
+
+ /* Backup last soc every delta soc interrupt */
+ chip->use_last_soc = false;
+ if (fg_reset_on_lockup) {
+ if (!chip->ima_error_handling)
+ chip->last_soc = get_monotonic_soc_raw(chip);
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("last_soc: %d\n", chip->last_soc);
+
+ fg_stay_awake(&chip->cc_soc_wakeup_source);
+ schedule_work(&chip->cc_soc_store_work);
+ }
+
+ if (chip->use_vbat_low_empty_soc) {
+ msoc = get_monotonic_soc_raw(chip);
+ if (msoc == 0 || chip->soc_empty) {
+ fg_stay_awake(&chip->empty_check_wakeup_source);
+ schedule_delayed_work(&chip->check_empty_work,
+ msecs_to_jiffies(FG_EMPTY_DEBOUNCE_MS));
+ }
+ }
+
schedule_work(&chip->battery_age_work);
if (chip->power_supply_registered)
@@ -3988,7 +5409,6 @@
return IRQ_HANDLED;
}
-#define FG_EMPTY_DEBOUNCE_MS 1500
static irqreturn_t fg_empty_soc_irq_handler(int irq, void *_chip)
{
struct fg_chip *chip = _chip;
@@ -4100,16 +5520,15 @@
fg_relax(&chip->resume_soc_wakeup_source);
}
-
#define OCV_COEFFS_START_REG 0x4C0
#define OCV_JUNCTION_REG 0x4D8
-#define NOM_CAP_REG 0x4F4
#define CUTOFF_VOLTAGE_REG 0x40C
#define RSLOW_CFG_REG 0x538
#define RSLOW_CFG_OFFSET 2
#define RSLOW_THRESH_REG 0x52C
#define RSLOW_THRESH_OFFSET 0
-#define TEMP_RS_TO_RSLOW_OFFSET 2
+#define RS_TO_RSLOW_CHG_OFFSET 2
+#define RS_TO_RSLOW_DISCHG_OFFSET 0
#define RSLOW_COMP_REG 0x528
#define RSLOW_COMP_C1_OFFSET 0
#define RSLOW_COMP_C2_OFFSET 2
@@ -4117,7 +5536,6 @@
{
u8 buffer[24];
int rc, i;
- int16_t cc_mah;
fg_mem_lock(chip);
rc = fg_mem_read(chip, buffer, OCV_COEFFS_START_REG, 24, 0, 0);
@@ -4138,30 +5556,21 @@
chip->ocv_coeffs[8], chip->ocv_coeffs[9],
chip->ocv_coeffs[10], chip->ocv_coeffs[11]);
}
- rc = fg_mem_read(chip, buffer, OCV_JUNCTION_REG, 1, 0, 0);
- chip->ocv_junction_p1p2 = buffer[0] * 100 / 255;
- rc |= fg_mem_read(chip, buffer, OCV_JUNCTION_REG, 1, 1, 0);
- chip->ocv_junction_p2p3 = buffer[0] * 100 / 255;
+ rc = fg_mem_read(chip, buffer, OCV_JUNCTION_REG, 2, 0, 0);
if (rc) {
pr_err("Failed to read ocv junctions: %d\n", rc);
goto done;
}
- rc = fg_mem_read(chip, buffer, NOM_CAP_REG, 2, 0, 0);
+
+ chip->ocv_junction_p1p2 = buffer[0] * 100 / 255;
+ chip->ocv_junction_p2p3 = buffer[1] * 100 / 255;
+
+ rc = load_battery_aging_data(chip);
if (rc) {
- pr_err("Failed to read nominal capacitance: %d\n", rc);
+ pr_err("Failed to load battery aging data, rc:%d\n", rc);
goto done;
}
- chip->nom_cap_uah = bcap_uah_2b(buffer);
- chip->actual_cap_uah = chip->nom_cap_uah;
- if (chip->learning_data.learned_cc_uah == 0) {
- chip->learning_data.learned_cc_uah = chip->nom_cap_uah;
- fg_cap_learning_save_data(chip);
- } else if (chip->learning_data.feedback_on) {
- cc_mah = div64_s64(chip->learning_data.learned_cc_uah, 1000);
- rc = fg_calc_and_store_cc_soc_coeff(chip, cc_mah);
- if (rc)
- pr_err("Error in restoring cc_soc_coeff, rc:%d\n", rc);
- }
+
rc = fg_mem_read(chip, buffer, CUTOFF_VOLTAGE_REG, 2, 0, 0);
if (rc) {
pr_err("Failed to read cutoff voltage: %d\n", rc);
@@ -4188,9 +5597,9 @@
}
chip->rslow_comp.rslow_thr = buffer[0];
rc = fg_mem_read(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2,
- RSLOW_THRESH_OFFSET, 0);
+ RS_TO_RSLOW_CHG_OFFSET, 0);
if (rc) {
- pr_err("unable to read rs to rslow: %d\n", rc);
+ pr_err("unable to read rs to rslow_chg: %d\n", rc);
goto done;
}
memcpy(chip->rslow_comp.rs_to_rslow, buffer, 2);
@@ -4207,6 +5616,68 @@
return rc;
}
+static int fg_update_batt_rslow_settings(struct fg_chip *chip)
+{
+ int64_t rs_to_rslow_chg, rs_to_rslow_dischg, batt_esr, rconn_uohm;
+ u8 buffer[2];
+ int rc;
+
+ rc = fg_mem_read(chip, buffer, BATTERY_ESR_REG, 2, ESR_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to read battery_esr: %d\n", rc);
+ goto done;
+ }
+ batt_esr = half_float(buffer);
+
+ rc = fg_mem_read(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2,
+ RS_TO_RSLOW_DISCHG_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to read rs to rslow dischg: %d\n", rc);
+ goto done;
+ }
+ rs_to_rslow_dischg = half_float(buffer);
+
+ rc = fg_mem_read(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2,
+ RS_TO_RSLOW_CHG_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to read rs to rslow chg: %d\n", rc);
+ goto done;
+ }
+ rs_to_rslow_chg = half_float(buffer);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("rs_rslow_chg: %lld, rs_rslow_dischg: %lld, esr: %lld\n",
+ rs_to_rslow_chg, rs_to_rslow_dischg, batt_esr);
+
+ rconn_uohm = chip->rconn_mohm * 1000;
+ rs_to_rslow_dischg = div64_s64(rs_to_rslow_dischg * batt_esr,
+ batt_esr + rconn_uohm);
+ rs_to_rslow_chg = div64_s64(rs_to_rslow_chg * batt_esr,
+ batt_esr + rconn_uohm);
+
+ half_float_to_buffer(rs_to_rslow_chg, buffer);
+ rc = fg_mem_write(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2,
+ RS_TO_RSLOW_CHG_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write rs_to_rslow_chg: %d\n", rc);
+ goto done;
+ }
+
+ half_float_to_buffer(rs_to_rslow_dischg, buffer);
+ rc = fg_mem_write(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2,
+ RS_TO_RSLOW_DISCHG_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write rs_to_rslow_dischg: %d\n", rc);
+ goto done;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Modified rs_rslow_chg: %lld, rs_rslow_dischg: %lld\n",
+ rs_to_rslow_chg, rs_to_rslow_dischg);
+done:
+ return rc;
+}
+
#define RSLOW_CFG_MASK (BIT(2) | BIT(3) | BIT(4) | BIT(5))
#define RSLOW_CFG_ON_VAL (BIT(2) | BIT(3))
#define RSLOW_THRESH_FULL_VAL 0xFF
@@ -4233,7 +5704,7 @@
half_float_to_buffer(chip->rslow_comp.chg_rs_to_rslow, buffer);
rc = fg_mem_write(chip, buffer,
- TEMP_RS_TO_RSLOW_REG, 2, TEMP_RS_TO_RSLOW_OFFSET, 0);
+ TEMP_RS_TO_RSLOW_REG, 2, RS_TO_RSLOW_CHG_OFFSET, 0);
if (rc) {
pr_err("unable to write rs to rslow: %d\n", rc);
goto done;
@@ -4286,7 +5757,7 @@
}
rc = fg_mem_write(chip, chip->rslow_comp.rs_to_rslow,
- TEMP_RS_TO_RSLOW_REG, 2, TEMP_RS_TO_RSLOW_OFFSET, 0);
+ TEMP_RS_TO_RSLOW_REG, 2, RS_TO_RSLOW_CHG_OFFSET, 0);
if (rc) {
pr_err("unable to write rs to rslow: %d\n", rc);
goto done;
@@ -4510,6 +5981,58 @@
fg_relax(&chip->esr_extract_wakeup_source);
}
+#define KI_COEFF_MEDC_REG 0x400
+#define KI_COEFF_MEDC_OFFSET 0
+#define KI_COEFF_HIGHC_REG 0x404
+#define KI_COEFF_HIGHC_OFFSET 0
+#define DEFAULT_MEDC_VOLTAGE_GAIN 3
+#define DEFAULT_HIGHC_VOLTAGE_GAIN 2
+static void discharge_gain_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work, struct fg_chip,
+ dischg_gain_work);
+ u8 buf[2];
+ int capacity, rc, i;
+ int64_t medc_val = DEFAULT_MEDC_VOLTAGE_GAIN;
+ int64_t highc_val = DEFAULT_HIGHC_VOLTAGE_GAIN;
+
+ capacity = get_prop_capacity(chip);
+ if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) {
+ for (i = VOLT_GAIN_MAX - 1; i >= 0; i--) {
+ if (capacity <= chip->dischg_gain.soc[i]) {
+ medc_val = chip->dischg_gain.medc_gain[i];
+ highc_val = chip->dischg_gain.highc_gain[i];
+ }
+ }
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Capacity: %d, medc_gain: %lld highc_gain: %lld\n",
+ capacity, medc_val, highc_val);
+
+ medc_val *= MICRO_UNIT;
+ half_float_to_buffer(medc_val, buf);
+ rc = fg_mem_write(chip, buf, KI_COEFF_MEDC_REG, 2,
+ KI_COEFF_MEDC_OFFSET, 0);
+ if (rc)
+ pr_err("Couldn't write to ki_coeff_medc_reg, rc=%d\n", rc);
+ else if (fg_debug_mask & FG_STATUS)
+ pr_info("Value [%x %x] written to ki_coeff_medc\n", buf[0],
+ buf[1]);
+
+ highc_val *= MICRO_UNIT;
+ half_float_to_buffer(highc_val, buf);
+ rc = fg_mem_write(chip, buf, KI_COEFF_HIGHC_REG, 2,
+ KI_COEFF_HIGHC_OFFSET, 0);
+ if (rc)
+ pr_err("Couldn't write to ki_coeff_highc_reg, rc=%d\n", rc);
+ else if (fg_debug_mask & FG_STATUS)
+ pr_info("Value [%x %x] written to ki_coeff_highc\n", buf[0],
+ buf[1]);
+
+ fg_relax(&chip->dischg_gain_wakeup_source);
+}
+
#define LOW_LATENCY BIT(6)
#define BATT_PROFILE_OFFSET 0x4C0
#define PROFILE_INTEGRITY_REG 0x53C
@@ -4529,7 +6052,7 @@
pr_info("restarting fuel gauge...\n");
try_again:
- if (write_profile) {
+ if (write_profile && !chip->ima_error_handling) {
if (!chip->charging_disabled) {
pr_err("Charging not yet disabled!\n");
return -EINVAL;
@@ -4770,7 +6293,8 @@
#define BATTERY_PSY_WAIT_MS 2000
static int fg_batt_profile_init(struct fg_chip *chip)
{
- int rc = 0, ret, len, batt_id;
+ int rc = 0, ret;
+ int len, batt_id;
struct device_node *node = chip->pdev->dev.of_node;
struct device_node *batt_node, *profile_node;
const char *data, *batt_type_str;
@@ -4792,6 +6316,19 @@
goto no_profile;
}
+ /* Check whether the charger is ready */
+ if (!is_charger_available(chip))
+ goto reschedule;
+
+ /* Disable charging for a FG cycle before calculating vbat_in_range */
+ if (!chip->charging_disabled) {
+ rc = set_prop_enable_charging(chip, false);
+ if (rc)
+ pr_err("Failed to disable charging, rc=%d\n", rc);
+
+ goto update;
+ }
+
batt_node = of_find_node_by_name(node, "qcom,battery-data");
if (!batt_node) {
pr_warn("No available batterydata, using OTP defaults\n");
@@ -4808,8 +6345,12 @@
fg_batt_type);
if (IS_ERR_OR_NULL(profile_node)) {
rc = PTR_ERR(profile_node);
- pr_err("couldn't find profile handle %d\n", rc);
- goto no_profile;
+ if (rc == -EPROBE_DEFER) {
+ goto reschedule;
+ } else {
+ pr_err("couldn't find profile handle rc=%d\n", rc);
+ goto no_profile;
+ }
}
/* read rslow compensation values if they're available */
@@ -4903,18 +6444,6 @@
goto no_profile;
}
- /* Check whether the charger is ready */
- if (!is_charger_available(chip))
- goto reschedule;
-
- /* Disable charging for a FG cycle before calculating vbat_in_range */
- if (!chip->charging_disabled) {
- rc = set_prop_enable_charging(chip, false);
- if (rc)
- pr_err("Failed to disable charging, rc=%d\n", rc);
-
- goto reschedule;
- }
vbat_in_range = get_vbat_est_diff(chip)
< settings[FG_MEM_VBAT_EST_DIFF].value * 1000;
@@ -4956,11 +6485,7 @@
chip->batt_profile, len, false);
}
- if (chip->power_supply_registered)
- power_supply_changed(chip->bms_psy);
-
memcpy(chip->batt_profile, data, len);
-
chip->batt_profile_len = len;
if (fg_debug_mask & FG_STATUS)
@@ -4995,6 +6520,11 @@
}
}
+ if (chip->rconn_mohm > 0) {
+ rc = fg_update_batt_rslow_settings(chip);
+ if (rc)
+ pr_err("Error in updating ESR, rc=%d\n", rc);
+ }
done:
if (chip->charging_disabled) {
rc = set_prop_enable_charging(chip, true);
@@ -5008,8 +6538,22 @@
chip->batt_type = fg_batt_type;
else
chip->batt_type = batt_type_str;
+
+ if (chip->first_profile_loaded && fg_reset_on_lockup) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("restoring SRAM registers\n");
+ rc = fg_backup_sram_registers(chip, false);
+ if (rc)
+ pr_err("Couldn't restore sram registers\n");
+
+ /* Read the cycle counter back from FG SRAM */
+ if (chip->cyc_ctr.en)
+ restore_cycle_counter(chip);
+ }
+
chip->first_profile_loaded = true;
chip->profile_loaded = true;
+ chip->soc_reporting_ready = true;
chip->battery_missing = is_battery_missing(chip);
update_chg_iterm(chip);
update_cc_cv_setpoint(chip);
@@ -5025,8 +6569,10 @@
fg_relax(&chip->profile_wakeup_source);
pr_info("Battery SOC: %d, V: %duV\n", get_prop_capacity(chip),
fg_data[FG_DATA_VOLTAGE].value);
+ complete_all(&chip->fg_reset_done);
return rc;
no_profile:
+ chip->soc_reporting_ready = true;
if (chip->charging_disabled) {
rc = set_prop_enable_charging(chip, true);
if (rc)
@@ -5039,14 +6585,15 @@
power_supply_changed(chip->bms_psy);
fg_relax(&chip->profile_wakeup_source);
return rc;
-reschedule:
- schedule_delayed_work(
- &chip->batt_profile_init,
- msecs_to_jiffies(BATTERY_PSY_WAIT_MS));
+update:
cancel_delayed_work(&chip->update_sram_data);
schedule_delayed_work(
&chip->update_sram_data,
msecs_to_jiffies(0));
+reschedule:
+ schedule_delayed_work(
+ &chip->batt_profile_init,
+ msecs_to_jiffies(BATTERY_PSY_WAIT_MS));
fg_relax(&chip->profile_wakeup_source);
return 0;
}
@@ -5056,14 +6603,41 @@
struct fg_chip *chip = container_of(work,
struct fg_chip,
check_empty_work.work);
+ bool vbatt_low_sts;
+ int msoc;
- if (fg_is_batt_empty(chip)) {
+ /* handle empty soc based on vbatt-low interrupt */
+ if (chip->use_vbat_low_empty_soc) {
+ if (fg_get_vbatt_status(chip, &vbatt_low_sts))
+ goto out;
+
+ msoc = get_monotonic_soc_raw(chip);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Vbatt_low: %d, msoc: %d\n", vbatt_low_sts,
+ msoc);
+ if (vbatt_low_sts || (msoc == 0))
+ chip->soc_empty = true;
+ else
+ chip->soc_empty = false;
+
+ if (chip->power_supply_registered)
+ power_supply_changed(chip->bms_psy);
+
+ if (!chip->vbat_low_irq_enabled) {
+ enable_irq(chip->batt_irq[VBATT_LOW].irq);
+ enable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+ chip->vbat_low_irq_enabled = true;
+ }
+ } else if (fg_is_batt_empty(chip)) {
if (fg_debug_mask & FG_STATUS)
pr_info("EMPTY SOC high\n");
chip->soc_empty = true;
if (chip->power_supply_registered)
power_supply_changed(chip->bms_psy);
}
+
+out:
fg_relax(&chip->empty_check_wakeup_source);
}
@@ -5103,7 +6677,7 @@
int rc;
u8 buffer[3];
int bsoc;
- int resume_soc_raw = FULL_SOC_RAW - settings[FG_MEM_RESUME_SOC].value;
+ int resume_soc_raw = settings[FG_MEM_RESUME_SOC].value;
bool disable = false;
u8 reg;
@@ -5318,6 +6892,98 @@
} \
} while (0)
+static int fg_dischg_gain_dt_init(struct fg_chip *chip)
+{
+ struct device_node *node = chip->pdev->dev.of_node;
+ struct property *prop;
+ int i, rc = 0;
+ size_t size;
+
+ prop = of_find_property(node, "qcom,fg-dischg-voltage-gain-soc",
+ NULL);
+ if (!prop) {
+ pr_err("qcom-fg-dischg-voltage-gain-soc not specified\n");
+ goto out;
+ }
+
+ size = prop->length / sizeof(u32);
+ if (size != VOLT_GAIN_MAX) {
+ pr_err("Voltage gain SOC specified is of incorrect size\n");
+ goto out;
+ }
+
+ rc = of_property_read_u32_array(node,
+ "qcom,fg-dischg-voltage-gain-soc", chip->dischg_gain.soc, size);
+ if (rc < 0) {
+ pr_err("Reading qcom-fg-dischg-voltage-gain-soc failed, rc=%d\n",
+ rc);
+ goto out;
+ }
+
+ for (i = 0; i < VOLT_GAIN_MAX; i++) {
+ if (chip->dischg_gain.soc[i] > 100) {
+ pr_err("Incorrect dischg-voltage-gain-soc\n");
+ goto out;
+ }
+ }
+
+ prop = of_find_property(node, "qcom,fg-dischg-med-voltage-gain",
+ NULL);
+ if (!prop) {
+ pr_err("qcom-fg-dischg-med-voltage-gain not specified\n");
+ goto out;
+ }
+
+ size = prop->length / sizeof(u32);
+ if (size != VOLT_GAIN_MAX) {
+ pr_err("med-voltage-gain specified is of incorrect size\n");
+ goto out;
+ }
+
+ rc = of_property_read_u32_array(node,
+ "qcom,fg-dischg-med-voltage-gain", chip->dischg_gain.medc_gain,
+ size);
+ if (rc < 0) {
+ pr_err("Reading qcom-fg-dischg-med-voltage-gain failed, rc=%d\n",
+ rc);
+ goto out;
+ }
+
+ prop = of_find_property(node, "qcom,fg-dischg-high-voltage-gain",
+ NULL);
+ if (!prop) {
+ pr_err("qcom-fg-dischg-high-voltage-gain not specified\n");
+ goto out;
+ }
+
+ size = prop->length / sizeof(u32);
+ if (size != VOLT_GAIN_MAX) {
+ pr_err("high-voltage-gain specified is of incorrect size\n");
+ goto out;
+ }
+
+ rc = of_property_read_u32_array(node,
+ "qcom,fg-dischg-high-voltage-gain",
+ chip->dischg_gain.highc_gain, size);
+ if (rc < 0) {
+ pr_err("Reading qcom-fg-dischg-high-voltage-gain failed, rc=%d\n",
+ rc);
+ goto out;
+ }
+
+ if (fg_debug_mask & FG_STATUS) {
+ for (i = 0; i < VOLT_GAIN_MAX; i++)
+ pr_info("SOC:%d MedC_Gain:%d HighC_Gain: %d\n",
+ chip->dischg_gain.soc[i],
+ chip->dischg_gain.medc_gain[i],
+ chip->dischg_gain.highc_gain[i]);
+ }
+ return 0;
+out:
+ chip->dischg_gain.enable = false;
+ return rc;
+}
+
#define DEFAULT_EVALUATION_CURRENT_MA 1000
static int fg_of_init(struct fg_chip *chip)
{
@@ -5395,6 +7061,10 @@
"cl-max-start-capacity", rc, 15);
OF_READ_PROPERTY(chip->learning_data.vbat_est_thr_uv,
"cl-vbat-est-thr-uv", rc, 40000);
+ OF_READ_PROPERTY(chip->learning_data.max_cap_limit,
+ "cl-max-limit-deciperc", rc, 0);
+ OF_READ_PROPERTY(chip->learning_data.min_cap_limit,
+ "cl-min-limit-deciperc", rc, 0);
OF_READ_PROPERTY(chip->evaluation_current,
"aging-eval-current-ma", rc,
DEFAULT_EVALUATION_CURRENT_MA);
@@ -5455,6 +7125,77 @@
chip->esr_pulse_tune_en = of_property_read_bool(node,
"qcom,esr-pulse-tuning-en");
+ chip->soc_slope_limiter_en = of_property_read_bool(node,
+ "qcom,fg-control-slope-limiter");
+ if (chip->soc_slope_limiter_en) {
+ OF_READ_PROPERTY(chip->slope_limit_temp,
+ "fg-slope-limit-temp-threshold", rc,
+ SLOPE_LIMIT_TEMP_THRESHOLD);
+
+ OF_READ_PROPERTY(chip->slope_limit_coeffs[LOW_TEMP_CHARGE],
+ "fg-slope-limit-low-temp-chg", rc,
+ SLOPE_LIMIT_LOW_TEMP_CHG);
+
+ OF_READ_PROPERTY(chip->slope_limit_coeffs[HIGH_TEMP_CHARGE],
+ "fg-slope-limit-high-temp-chg", rc,
+ SLOPE_LIMIT_HIGH_TEMP_CHG);
+
+ OF_READ_PROPERTY(chip->slope_limit_coeffs[LOW_TEMP_DISCHARGE],
+ "fg-slope-limit-low-temp-dischg", rc,
+ SLOPE_LIMIT_LOW_TEMP_DISCHG);
+
+ OF_READ_PROPERTY(chip->slope_limit_coeffs[HIGH_TEMP_DISCHARGE],
+ "fg-slope-limit-high-temp-dischg", rc,
+ SLOPE_LIMIT_HIGH_TEMP_DISCHG);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("slope-limiter, temp: %d coeffs: [%d %d %d %d]\n",
+ chip->slope_limit_temp,
+ chip->slope_limit_coeffs[LOW_TEMP_CHARGE],
+ chip->slope_limit_coeffs[HIGH_TEMP_CHARGE],
+ chip->slope_limit_coeffs[LOW_TEMP_DISCHARGE],
+ chip->slope_limit_coeffs[HIGH_TEMP_DISCHARGE]);
+ }
+
+ OF_READ_PROPERTY(chip->rconn_mohm, "fg-rconn-mohm", rc, 0);
+
+ chip->dischg_gain.enable = of_property_read_bool(node,
+ "qcom,fg-dischg-voltage-gain-ctrl");
+ if (chip->dischg_gain.enable) {
+ rc = fg_dischg_gain_dt_init(chip);
+ if (rc) {
+ pr_err("Error in reading dischg_gain parameters, rc=%d\n",
+ rc);
+ rc = 0;
+ }
+ }
+
+ chip->use_vbat_low_empty_soc = of_property_read_bool(node,
+ "qcom,fg-use-vbat-low-empty-soc");
+
+ OF_READ_PROPERTY(chip->batt_temp_low_limit,
+ "fg-batt-temp-low-limit", rc, BATT_TEMP_LOW_LIMIT);
+
+ OF_READ_PROPERTY(chip->batt_temp_high_limit,
+ "fg-batt-temp-high-limit", rc, BATT_TEMP_HIGH_LIMIT);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("batt-temp-low_limit: %d batt-temp-high_limit: %d\n",
+ chip->batt_temp_low_limit, chip->batt_temp_high_limit);
+
+ OF_READ_PROPERTY(chip->cc_soc_limit_pct, "fg-cc-soc-limit-pct", rc, 0);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("cc-soc-limit-pct: %d\n", chip->cc_soc_limit_pct);
+
+ chip->batt_info_restore = of_property_read_bool(node,
+ "qcom,fg-restore-batt-info");
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("restore: %d validate_by_ocv: %d range_pct: %d\n",
+ chip->batt_info_restore, fg_batt_valid_ocv,
+ fg_batt_range_pct);
+
return rc;
}
@@ -5528,15 +7269,22 @@
chip->soc_irq[FULL_SOC].irq, rc);
return rc;
}
- rc = devm_request_irq(chip->dev,
- chip->soc_irq[EMPTY_SOC].irq,
- fg_empty_soc_irq_handler,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "empty-soc", chip);
- if (rc < 0) {
- pr_err("Can't request %d empty-soc: %d\n",
- chip->soc_irq[EMPTY_SOC].irq, rc);
- return rc;
+ enable_irq_wake(chip->soc_irq[FULL_SOC].irq);
+ chip->full_soc_irq_enabled = true;
+
+ if (!chip->use_vbat_low_empty_soc) {
+ rc = devm_request_irq(chip->dev,
+ chip->soc_irq[EMPTY_SOC].irq,
+ fg_empty_soc_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ "empty-soc", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d empty-soc: %d\n",
+ chip->soc_irq[EMPTY_SOC].irq,
+ rc);
+ return rc;
+ }
}
rc = devm_request_irq(chip->dev,
chip->soc_irq[DELTA_SOC].irq,
@@ -5558,8 +7306,8 @@
}
enable_irq_wake(chip->soc_irq[DELTA_SOC].irq);
- enable_irq_wake(chip->soc_irq[FULL_SOC].irq);
- enable_irq_wake(chip->soc_irq[EMPTY_SOC].irq);
+ if (!chip->use_vbat_low_empty_soc)
+ enable_irq_wake(chip->soc_irq[EMPTY_SOC].irq);
break;
case FG_MEMIF:
chip->mem_irq[FG_MEM_AVAIL].irq
@@ -5581,8 +7329,53 @@
}
break;
case FG_BATT:
- chip->batt_irq[BATT_MISSING].irq
- = of_irq_get_byname(child, "batt-missing");
+ chip->batt_irq[JEITA_SOFT_COLD].irq =
+ of_irq_get_byname(child, "soft-cold");
+ if (chip->batt_irq[JEITA_SOFT_COLD].irq < 0) {
+ pr_err("Unable to get soft-cold irq\n");
+ rc = -EINVAL;
+ return rc;
+ }
+ rc = devm_request_threaded_irq(chip->dev,
+ chip->batt_irq[JEITA_SOFT_COLD].irq,
+ NULL,
+ fg_jeita_soft_cold_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "soft-cold", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d soft-cold: %d\n",
+ chip->batt_irq[JEITA_SOFT_COLD].irq,
+ rc);
+ return rc;
+ }
+ disable_irq(chip->batt_irq[JEITA_SOFT_COLD].irq);
+ chip->batt_irq[JEITA_SOFT_COLD].disabled = true;
+ chip->batt_irq[JEITA_SOFT_HOT].irq =
+ of_irq_get_byname(child, "soft-hot");
+ if (chip->batt_irq[JEITA_SOFT_HOT].irq < 0) {
+ pr_err("Unable to get soft-hot irq\n");
+ rc = -EINVAL;
+ return rc;
+ }
+ rc = devm_request_threaded_irq(chip->dev,
+ chip->batt_irq[JEITA_SOFT_HOT].irq,
+ NULL,
+ fg_jeita_soft_hot_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "soft-hot", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d soft-hot: %d\n",
+ chip->batt_irq[JEITA_SOFT_HOT].irq, rc);
+ return rc;
+ }
+ disable_irq(chip->batt_irq[JEITA_SOFT_HOT].irq);
+ chip->batt_irq[JEITA_SOFT_HOT].disabled = true;
+ chip->batt_irq[BATT_MISSING].irq =
+ of_irq_get_byname(child, "batt-missing");
if (chip->batt_irq[BATT_MISSING].irq < 0) {
pr_err("Unable to get batt-missing irq\n");
rc = -EINVAL;
@@ -5619,8 +7412,14 @@
chip->batt_irq[VBATT_LOW].irq, rc);
return rc;
}
- disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
- chip->vbat_low_irq_enabled = false;
+ if (chip->use_vbat_low_empty_soc) {
+ enable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+ chip->vbat_low_irq_enabled = true;
+ } else {
+ disable_irq_nosync(
+ chip->batt_irq[VBATT_LOW].irq);
+ chip->vbat_low_irq_enabled = false;
+ }
break;
case FG_ADC:
break;
@@ -5630,17 +7429,22 @@
}
}
+ chip->irqs_enabled = true;
return rc;
}
-static void fg_cleanup(struct fg_chip *chip)
+static void fg_cancel_all_works(struct fg_chip *chip)
{
+ cancel_delayed_work_sync(&chip->check_sanity_work);
cancel_delayed_work_sync(&chip->update_sram_data);
cancel_delayed_work_sync(&chip->update_temp_work);
cancel_delayed_work_sync(&chip->update_jeita_setting);
cancel_delayed_work_sync(&chip->check_empty_work);
cancel_delayed_work_sync(&chip->batt_profile_init);
alarm_try_to_cancel(&chip->fg_cap_learning_alarm);
+ alarm_try_to_cancel(&chip->hard_jeita_alarm);
+ if (!chip->ima_error_handling)
+ cancel_work_sync(&chip->ima_error_recovery_work);
cancel_work_sync(&chip->rslow_comp_work);
cancel_work_sync(&chip->set_resume_soc_work);
cancel_work_sync(&chip->fg_cap_learning_work);
@@ -5652,12 +7456,23 @@
cancel_work_sync(&chip->gain_comp_work);
cancel_work_sync(&chip->init_work);
cancel_work_sync(&chip->charge_full_work);
+ cancel_work_sync(&chip->bcl_hi_power_work);
cancel_work_sync(&chip->esr_extract_config_work);
+ cancel_work_sync(&chip->slope_limiter_work);
+ cancel_work_sync(&chip->dischg_gain_work);
+ cancel_work_sync(&chip->cc_soc_store_work);
+}
+
+static void fg_cleanup(struct fg_chip *chip)
+{
+ fg_cancel_all_works(chip);
+ power_supply_unregister(chip->bms_psy);
mutex_destroy(&chip->rslow_comp.lock);
mutex_destroy(&chip->rw_lock);
mutex_destroy(&chip->cyc_ctr.lock);
mutex_destroy(&chip->learning_data.learning_lock);
mutex_destroy(&chip->sysfs_restart_lock);
+ mutex_destroy(&chip->ima_recovery_lock);
wakeup_source_trash(&chip->resume_soc_wakeup_source.source);
wakeup_source_trash(&chip->empty_check_wakeup_source.source);
wakeup_source_trash(&chip->memif_wakeup_source.source);
@@ -5667,6 +7482,11 @@
wakeup_source_trash(&chip->gain_comp_wakeup_source.source);
wakeup_source_trash(&chip->capacity_learning_wakeup_source.source);
wakeup_source_trash(&chip->esr_extract_wakeup_source.source);
+ wakeup_source_trash(&chip->slope_limit_wakeup_source.source);
+ wakeup_source_trash(&chip->dischg_gain_wakeup_source.source);
+ wakeup_source_trash(&chip->fg_reset_wakeup_source.source);
+ wakeup_source_trash(&chip->cc_soc_wakeup_source.source);
+ wakeup_source_trash(&chip->sanity_wakeup_source.source);
}
static int fg_remove(struct platform_device *pdev)
@@ -6155,12 +7975,13 @@
return 0;
}
-#define FG_ALG_SYSCTL_1 0x4B0
-#define SOC_CNFG 0x450
-#define SOC_DELTA_OFFSET 3
-#define DELTA_SOC_PERCENT 1
-#define I_TERM_QUAL_BIT BIT(1)
-#define PATCH_NEG_CURRENT_BIT BIT(3)
+#define FG_ALG_SYSCTL_1 0x4B0
+#define SOC_CNFG 0x450
+#define SOC_DELTA_OFFSET 3
+#define DELTA_SOC_PERCENT 1
+#define ALERT_CFG_OFFSET 3
+#define I_TERM_QUAL_BIT BIT(1)
+#define PATCH_NEG_CURRENT_BIT BIT(3)
#define KI_COEFF_PRED_FULL_ADDR 0x408
#define KI_COEFF_PRED_FULL_4_0_MSB 0x88
#define KI_COEFF_PRED_FULL_4_0_LSB 0x00
@@ -6168,6 +7989,12 @@
#define FG_ADC_CONFIG_REG 0x4B8
#define FG_BCL_CONFIG_OFFSET 0x3
#define BCL_FORCED_HPM_IN_CHARGE BIT(2)
+#define IRQ_USE_VOLTAGE_HYST_BIT BIT(0)
+#define EMPTY_FROM_VOLTAGE_BIT BIT(1)
+#define EMPTY_FROM_SOC_BIT BIT(2)
+#define EMPTY_SOC_IRQ_MASK (IRQ_USE_VOLTAGE_HYST_BIT | \
+ EMPTY_FROM_SOC_BIT | \
+ EMPTY_FROM_VOLTAGE_BIT)
static int fg_common_hw_init(struct fg_chip *chip)
{
int rc;
@@ -6176,8 +8003,9 @@
update_iterm(chip);
update_cutoff_voltage(chip);
- update_irq_volt_empty(chip);
update_bcl_thresholds(chip);
+ if (!chip->use_vbat_low_empty_soc)
+ update_irq_volt_empty(chip);
resume_soc_raw = settings[FG_MEM_RESUME_SOC].value;
if (resume_soc_raw > 0) {
@@ -6207,6 +8035,11 @@
return rc;
}
+ /* Override the voltage threshold for vbatt_low with empty_volt */
+ if (chip->use_vbat_low_empty_soc)
+ settings[FG_MEM_BATT_LOW].value =
+ settings[FG_MEM_IRQ_VOLT_EMPTY].value;
+
rc = fg_mem_masked_write(chip, settings[FG_MEM_BATT_LOW].address, 0xFF,
batt_to_setpoint_8b(settings[FG_MEM_BATT_LOW].value),
settings[FG_MEM_BATT_LOW].offset);
@@ -6274,20 +8107,41 @@
if (fg_debug_mask & FG_STATUS)
pr_info("imptr_pulse_slow is %sabled\n",
chip->imptr_pulse_slow_en ? "en" : "dis");
+ }
- rc = fg_mem_read(chip, &val, RSLOW_CFG_REG, 1, RSLOW_CFG_OFFSET,
- 0);
- if (rc) {
- pr_err("unable to read rslow cfg: %d\n", rc);
- return rc;
- }
+ rc = fg_mem_read(chip, &val, RSLOW_CFG_REG, 1, RSLOW_CFG_OFFSET,
+ 0);
+ if (rc) {
+ pr_err("unable to read rslow cfg: %d\n", rc);
+ return rc;
+ }
- if (val & RSLOW_CFG_ON_VAL)
- chip->rslow_comp.active = true;
+ if (val & RSLOW_CFG_ON_VAL)
+ chip->rslow_comp.active = true;
- if (fg_debug_mask & FG_STATUS)
- pr_info("rslow_comp active is %sabled\n",
- chip->rslow_comp.active ? "en" : "dis");
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("rslow_comp active is %sabled\n",
+ chip->rslow_comp.active ? "en" : "dis");
+
+ /*
+ * Clear bits 0-2 in 0x4B3 and set them again to make empty_soc irq
+ * trigger again.
+ */
+ rc = fg_mem_masked_write(chip, FG_ALG_SYSCTL_1, EMPTY_SOC_IRQ_MASK,
+ 0, ALERT_CFG_OFFSET);
+ if (rc) {
+ pr_err("failed to write to 0x4B3 rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Wait for a FG cycle before enabling empty soc irq configuration */
+ msleep(FG_CYCLE_MS);
+
+ rc = fg_mem_masked_write(chip, FG_ALG_SYSCTL_1, EMPTY_SOC_IRQ_MASK,
+ EMPTY_SOC_IRQ_MASK, ALERT_CFG_OFFSET);
+ if (rc) {
+ pr_err("failed to write to 0x4B3 rc=%d\n", rc);
+ return rc;
}
return 0;
@@ -6414,12 +8268,13 @@
/* Setup workaround flag based on PMIC type */
if (fg_sense_type == INTERNAL_CURRENT_SENSE)
chip->wa_flag |= IADC_GAIN_COMP_WA;
- if (chip->pmic_revision[REVID_DIG_MAJOR] > 1)
+ if (chip->pmic_revision[REVID_DIG_MAJOR] >= 1)
chip->wa_flag |= USE_CC_SOC_REG;
break;
case PMI8950:
case PMI8937:
+ case PMI8940:
rc = fg_8950_hw_init(chip);
/* Setup workaround flag based on PMIC type */
chip->wa_flag |= BCL_HI_POWER_FOR_CHGLED_WA;
@@ -6438,12 +8293,223 @@
return rc;
}
+static int fg_init_iadc_config(struct fg_chip *chip)
+{
+ u8 reg[2];
+ int rc;
+
+ /* read default gain config */
+ rc = fg_mem_read(chip, reg, K_VCOR_REG, 2, DEF_GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to read default gain rc=%d\n", rc);
+ return rc;
+ }
+
+ if (reg[1] || reg[0]) {
+ /*
+ * Default gain register has valid value:
+ * - write to gain register.
+ */
+ rc = fg_mem_write(chip, reg, GAIN_REG, 2,
+ GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to write gain rc=%d\n", rc);
+ return rc;
+ }
+ } else {
+ /*
+ * Default gain register is invalid:
+ * - read gain register for default gain value
+ * - write to default gain register.
+ */
+ rc = fg_mem_read(chip, reg, GAIN_REG, 2,
+ GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to read gain rc=%d\n", rc);
+ return rc;
+ }
+ rc = fg_mem_write(chip, reg, K_VCOR_REG, 2,
+ DEF_GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to write default gain rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ chip->iadc_comp_data.dfl_gain_reg[0] = reg[0];
+ chip->iadc_comp_data.dfl_gain_reg[1] = reg[1];
+ chip->iadc_comp_data.dfl_gain = half_float(reg);
+
+ pr_debug("IADC gain initial config reg_val 0x%x%x gain %lld\n",
+ reg[1], reg[0], chip->iadc_comp_data.dfl_gain);
+ return 0;
+}
+
+#define EN_WR_FGXCT_PRD BIT(6)
+#define EN_RD_FGXCT_PRD BIT(5)
+#define FG_RESTART_TIMEOUT_MS 12000
+static void ima_error_recovery_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ ima_error_recovery_work);
+ bool tried_again = false;
+ int rc;
+ u8 buf[4] = {0, 0, 0, 0};
+
+ fg_stay_awake(&chip->fg_reset_wakeup_source);
+ mutex_lock(&chip->ima_recovery_lock);
+ if (!chip->ima_error_handling) {
+ pr_err("Scheduled by mistake?\n");
+ mutex_unlock(&chip->ima_recovery_lock);
+ fg_relax(&chip->fg_reset_wakeup_source);
+ return;
+ }
+
+ /*
+ * SOC should be read and used until the error recovery completes.
+ * Without this, there could be a fluctuation in SOC values notified
+ * to the userspace.
+ */
+ chip->use_last_soc = true;
+
+ /* Block SRAM access till FG reset is complete */
+ chip->block_sram_access = true;
+
+ /* Release the mutex to avoid deadlock while cancelling the works */
+ mutex_unlock(&chip->ima_recovery_lock);
+
+ /* Cancel all the works */
+ fg_cancel_all_works(chip);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("last_soc: %d\n", chip->last_soc);
+
+ mutex_lock(&chip->ima_recovery_lock);
+ /* Acquire IMA access forcibly from FG ALG */
+ rc = fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
+ EN_WR_FGXCT_PRD | EN_RD_FGXCT_PRD,
+ EN_WR_FGXCT_PRD | EN_RD_FGXCT_PRD, 1);
+ if (rc) {
+ pr_err("Error in writing to IMA_CFG, rc=%d\n", rc);
+ goto out;
+ }
+
+ /* Release the IMA access now so that FG reset can go through */
+ rc = fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
+ EN_WR_FGXCT_PRD | EN_RD_FGXCT_PRD, 0, 1);
+ if (rc) {
+ pr_err("Error in writing to IMA_CFG, rc=%d\n", rc);
+ goto out;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("resetting FG\n");
+
+ /* Assert FG reset */
+ rc = fg_reset(chip, true);
+ if (rc) {
+ pr_err("Couldn't reset FG\n");
+ goto out;
+ }
+
+ /* Wait for a small time before deasserting FG reset */
+ msleep(100);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("clearing FG from reset\n");
+
+ /* Deassert FG reset */
+ rc = fg_reset(chip, false);
+ if (rc) {
+ pr_err("Couldn't clear FG reset\n");
+ goto out;
+ }
+
+ /* Wait for at least a FG cycle before doing SRAM access */
+ msleep(2000);
+
+ chip->block_sram_access = false;
+
+ if (!chip->init_done) {
+ schedule_work(&chip->init_work);
+ goto wait;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Calling hw_init\n");
+
+ /*
+ * Once FG is reset, everything in SRAM will be wiped out. Redo
+ * hw_init, update jeita settings etc., again to make sure all
+ * the settings got restored again.
+ */
+ rc = fg_hw_init(chip);
+ if (rc) {
+ pr_err("Error in hw_init, rc=%d\n", rc);
+ goto out;
+ }
+
+ update_jeita_setting(&chip->update_jeita_setting.work);
+
+ if (chip->wa_flag & IADC_GAIN_COMP_WA) {
+ rc = fg_init_iadc_config(chip);
+ if (rc)
+ goto out;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("loading battery profile\n");
+ if (!chip->use_otp_profile) {
+ chip->battery_missing = true;
+ chip->profile_loaded = false;
+ chip->soc_reporting_ready = false;
+ chip->batt_type = default_batt_type;
+ fg_handle_battery_insertion(chip);
+ }
+
+wait:
+ rc = wait_for_completion_interruptible_timeout(&chip->fg_reset_done,
+ msecs_to_jiffies(FG_RESTART_TIMEOUT_MS));
+
+ /* If we were interrupted wait again one more time. */
+ if (rc == -ERESTARTSYS && !tried_again) {
+ tried_again = true;
+ pr_debug("interrupted, waiting again\n");
+ goto wait;
+ } else if (rc <= 0) {
+ pr_err("fg_restart taking long time rc=%d\n", rc);
+ goto out;
+ }
+
+ rc = fg_mem_write(chip, buf, fg_data[FG_DATA_VINT_ERR].address,
+ fg_data[FG_DATA_VINT_ERR].len,
+ fg_data[FG_DATA_VINT_ERR].offset, 0);
+ if (rc < 0)
+ pr_err("Error in clearing VACT_INT_ERR, rc=%d\n", rc);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("IMA error recovery done...\n");
+out:
+ fg_restore_soc(chip);
+ fg_restore_cc_soc(chip);
+ fg_enable_irqs(chip, true);
+ update_sram_data_work(&chip->update_sram_data.work);
+ update_temp_data(&chip->update_temp_work.work);
+ schedule_delayed_work(&chip->check_sanity_work,
+ msecs_to_jiffies(1000));
+ chip->ima_error_handling = false;
+ mutex_unlock(&chip->ima_recovery_lock);
+ fg_relax(&chip->fg_reset_wakeup_source);
+}
+
#define DIG_MINOR 0x0
#define DIG_MAJOR 0x1
#define ANA_MINOR 0x2
#define ANA_MAJOR 0x3
#define IACS_INTR_SRC_SLCT BIT(3)
-static int fg_setup_memif_offset(struct fg_chip *chip)
+static int fg_memif_init(struct fg_chip *chip)
{
int rc;
@@ -6464,7 +8530,7 @@
break;
default:
pr_err("Digital Major rev=%d not supported\n",
- chip->revision[DIG_MAJOR]);
+ chip->revision[DIG_MAJOR]);
return -EINVAL;
}
@@ -6481,6 +8547,13 @@
pr_err("failed to configure interrupt source %d\n", rc);
return rc;
}
+
+ /* check for error condition */
+ rc = fg_check_ima_exception(chip, true);
+ if (rc) {
+ pr_err("Error in clearing IMA exception rc=%d", rc);
+ return rc;
+ }
}
return 0;
@@ -6515,6 +8588,7 @@
case PMI8950:
case PMI8937:
case PMI8996:
+ case PMI8940:
chip->pmic_subtype = pmic_rev_id->pmic_subtype;
chip->pmic_revision[REVID_RESERVED] = pmic_rev_id->rev1;
chip->pmic_revision[REVID_VARIANT] = pmic_rev_id->rev2;
@@ -6531,10 +8605,8 @@
}
#define INIT_JEITA_DELAY_MS 1000
-
static void delayed_init_work(struct work_struct *work)
{
- u8 reg[2];
int rc;
struct fg_chip *chip = container_of(work,
struct fg_chip,
@@ -6546,6 +8618,14 @@
rc = fg_hw_init(chip);
if (rc) {
pr_err("failed to hw init rc = %d\n", rc);
+ if (!chip->init_done && chip->ima_supported) {
+ rc = fg_check_alg_status(chip);
+ if (rc && rc != -EBUSY)
+ pr_err("Couldn't check FG ALG status, rc=%d\n",
+ rc);
+ fg_mem_release(chip);
+ return;
+ }
fg_mem_release(chip);
fg_cleanup(chip);
return;
@@ -6566,57 +8646,19 @@
if (!chip->use_otp_profile)
schedule_delayed_work(&chip->batt_profile_init, 0);
+ if (chip->ima_supported && fg_reset_on_lockup)
+ schedule_delayed_work(&chip->check_sanity_work,
+ msecs_to_jiffies(1000));
+
if (chip->wa_flag & IADC_GAIN_COMP_WA) {
- /* read default gain config */
- rc = fg_mem_read(chip, reg, K_VCOR_REG, 2, DEF_GAIN_OFFSET, 0);
- if (rc) {
- pr_err("Failed to read default gain rc=%d\n", rc);
+ rc = fg_init_iadc_config(chip);
+ if (rc)
goto done;
- }
-
- if (reg[1] || reg[0]) {
- /*
- * Default gain register has valid value:
- * - write to gain register.
- */
- rc = fg_mem_write(chip, reg, GAIN_REG, 2,
- GAIN_OFFSET, 0);
- if (rc) {
- pr_err("Failed to write gain rc=%d\n", rc);
- goto done;
- }
- } else {
- /*
- * Default gain register is invalid:
- * - read gain register for default gain value
- * - write to default gain register.
- */
- rc = fg_mem_read(chip, reg, GAIN_REG, 2,
- GAIN_OFFSET, 0);
- if (rc) {
- pr_err("Failed to read gain rc=%d\n", rc);
- goto done;
- }
- rc = fg_mem_write(chip, reg, K_VCOR_REG, 2,
- DEF_GAIN_OFFSET, 0);
- if (rc) {
- pr_err("Failed to write default gain rc=%d\n",
- rc);
- goto done;
- }
- }
-
- chip->iadc_comp_data.dfl_gain_reg[0] = reg[0];
- chip->iadc_comp_data.dfl_gain_reg[1] = reg[1];
- chip->iadc_comp_data.dfl_gain = half_float(reg);
- chip->input_present = is_input_present(chip);
- chip->otg_present = is_otg_present(chip);
- chip->init_done = true;
-
- pr_debug("IADC gain initial config reg_val 0x%x%x gain %lld\n",
- reg[1], reg[0], chip->iadc_comp_data.dfl_gain);
}
+ chip->input_present = is_input_present(chip);
+ chip->otg_present = is_otg_present(chip);
+ chip->init_done = true;
pr_debug("FG: HW_init success\n");
return;
@@ -6675,16 +8717,30 @@
"qpnp_fg_cap_learning");
wakeup_source_init(&chip->esr_extract_wakeup_source.source,
"qpnp_fg_esr_extract");
+ wakeup_source_init(&chip->slope_limit_wakeup_source.source,
+ "qpnp_fg_slope_limit");
+ wakeup_source_init(&chip->dischg_gain_wakeup_source.source,
+ "qpnp_fg_dischg_gain");
+ wakeup_source_init(&chip->fg_reset_wakeup_source.source,
+ "qpnp_fg_reset");
+ wakeup_source_init(&chip->cc_soc_wakeup_source.source,
+ "qpnp_fg_cc_soc");
+ wakeup_source_init(&chip->sanity_wakeup_source.source,
+ "qpnp_fg_sanity_check");
+ spin_lock_init(&chip->sec_access_lock);
mutex_init(&chip->rw_lock);
mutex_init(&chip->cyc_ctr.lock);
mutex_init(&chip->learning_data.learning_lock);
mutex_init(&chip->rslow_comp.lock);
mutex_init(&chip->sysfs_restart_lock);
+ mutex_init(&chip->ima_recovery_lock);
INIT_DELAYED_WORK(&chip->update_jeita_setting, update_jeita_setting);
INIT_DELAYED_WORK(&chip->update_sram_data, update_sram_data_work);
INIT_DELAYED_WORK(&chip->update_temp_work, update_temp_data);
INIT_DELAYED_WORK(&chip->check_empty_work, check_empty_work);
INIT_DELAYED_WORK(&chip->batt_profile_init, batt_profile_init);
+ INIT_DELAYED_WORK(&chip->check_sanity_work, check_sanity_work);
+ INIT_WORK(&chip->ima_error_recovery_work, ima_error_recovery_work);
INIT_WORK(&chip->rslow_comp_work, rslow_comp_work);
INIT_WORK(&chip->fg_cap_learning_work, fg_cap_learning_work);
INIT_WORK(&chip->dump_sram, dump_sram);
@@ -6699,13 +8755,19 @@
INIT_WORK(&chip->gain_comp_work, iadc_gain_comp_work);
INIT_WORK(&chip->bcl_hi_power_work, bcl_hi_power_work);
INIT_WORK(&chip->esr_extract_config_work, esr_extract_config_work);
+ INIT_WORK(&chip->slope_limiter_work, slope_limiter_work);
+ INIT_WORK(&chip->dischg_gain_work, discharge_gain_work);
+ INIT_WORK(&chip->cc_soc_store_work, cc_soc_store_work);
alarm_init(&chip->fg_cap_learning_alarm, ALARM_BOOTTIME,
fg_cap_learning_alarm_cb);
+ alarm_init(&chip->hard_jeita_alarm, ALARM_BOOTTIME,
+ fg_hard_jeita_alarm_cb);
init_completion(&chip->sram_access_granted);
init_completion(&chip->sram_access_revoked);
complete_all(&chip->sram_access_revoked);
init_completion(&chip->batt_id_avail);
init_completion(&chip->first_soc_done);
+ init_completion(&chip->fg_reset_done);
dev_set_drvdata(&pdev->dev, chip);
if (of_get_available_child_count(pdev->dev.of_node) == 0) {
@@ -6763,7 +8825,7 @@
return rc;
}
- rc = fg_setup_memif_offset(chip);
+ rc = fg_memif_init(chip);
if (rc) {
pr_err("Unable to setup mem_if offsets rc=%d\n", rc);
goto of_init_fail;
@@ -6834,10 +8896,18 @@
rc = fg_dfs_create(chip);
if (rc < 0) {
pr_err("failed to create debugfs rc = %d\n", rc);
- goto cancel_work;
+ goto power_supply_unregister;
}
}
+ /* Fake temperature till the actual temperature is read */
+ chip->last_good_temp = 250;
+
+ /* Initialize batt_info variables */
+ chip->batt_range_ocv = &fg_batt_valid_ocv;
+ chip->batt_range_pct = &fg_batt_range_pct;
+ memset(chip->batt_info, INT_MAX, sizeof(chip->batt_info));
+
schedule_work(&chip->init_work);
pr_info("FG Probe success - FG Revision DIG:%d.%d ANA:%d.%d PMIC subtype=%d\n",
@@ -6847,32 +8917,17 @@
return rc;
+power_supply_unregister:
+ power_supply_unregister(chip->bms_psy);
cancel_work:
- cancel_delayed_work_sync(&chip->update_jeita_setting);
- cancel_delayed_work_sync(&chip->update_sram_data);
- cancel_delayed_work_sync(&chip->update_temp_work);
- cancel_delayed_work_sync(&chip->check_empty_work);
- cancel_delayed_work_sync(&chip->batt_profile_init);
- alarm_try_to_cancel(&chip->fg_cap_learning_alarm);
- cancel_work_sync(&chip->set_resume_soc_work);
- cancel_work_sync(&chip->fg_cap_learning_work);
- cancel_work_sync(&chip->dump_sram);
- cancel_work_sync(&chip->status_change_work);
- cancel_work_sync(&chip->cycle_count_work);
- cancel_work_sync(&chip->update_esr_work);
- cancel_work_sync(&chip->rslow_comp_work);
- cancel_work_sync(&chip->sysfs_restart_work);
- cancel_work_sync(&chip->gain_comp_work);
- cancel_work_sync(&chip->init_work);
- cancel_work_sync(&chip->charge_full_work);
- cancel_work_sync(&chip->bcl_hi_power_work);
- cancel_work_sync(&chip->esr_extract_config_work);
+ fg_cancel_all_works(chip);
of_init_fail:
mutex_destroy(&chip->rslow_comp.lock);
mutex_destroy(&chip->rw_lock);
mutex_destroy(&chip->cyc_ctr.lock);
mutex_destroy(&chip->learning_data.learning_lock);
mutex_destroy(&chip->sysfs_restart_lock);
+ mutex_destroy(&chip->ima_recovery_lock);
wakeup_source_trash(&chip->resume_soc_wakeup_source.source);
wakeup_source_trash(&chip->empty_check_wakeup_source.source);
wakeup_source_trash(&chip->memif_wakeup_source.source);
@@ -6882,6 +8937,11 @@
wakeup_source_trash(&chip->gain_comp_wakeup_source.source);
wakeup_source_trash(&chip->capacity_learning_wakeup_source.source);
wakeup_source_trash(&chip->esr_extract_wakeup_source.source);
+ wakeup_source_trash(&chip->slope_limit_wakeup_source.source);
+ wakeup_source_trash(&chip->dischg_gain_wakeup_source.source);
+ wakeup_source_trash(&chip->fg_reset_wakeup_source.source);
+ wakeup_source_trash(&chip->cc_soc_wakeup_source.source);
+ wakeup_source_trash(&chip->sanity_wakeup_source.source);
return rc;
}
@@ -6938,11 +8998,103 @@
return 0;
}
+static void fg_check_ima_idle(struct fg_chip *chip)
+{
+ bool rif_mem_sts = true;
+ int rc, time_count = 0;
+
+ mutex_lock(&chip->rw_lock);
+ /* Make sure IMA is idle */
+ while (1) {
+ rc = fg_check_rif_mem_access(chip, &rif_mem_sts);
+ if (rc)
+ break;
+
+ if (!rif_mem_sts)
+ break;
+
+ if (time_count > 4) {
+ pr_err("Waited for ~16ms polling RIF_MEM_ACCESS_REQ\n");
+ fg_run_iacs_clear_sequence(chip);
+ break;
+ }
+
+ /* Wait for 4ms before reading RIF_MEM_ACCESS_REQ again */
+ usleep_range(4000, 4100);
+ time_count++;
+ }
+ mutex_unlock(&chip->rw_lock);
+}
+
+static void fg_shutdown(struct platform_device *pdev)
+{
+ struct fg_chip *chip = dev_get_drvdata(&pdev->dev);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_emerg("FG shutdown started\n");
+ fg_cancel_all_works(chip);
+ fg_check_ima_idle(chip);
+ chip->fg_shutdown = true;
+ if (fg_debug_mask & FG_STATUS)
+ pr_emerg("FG shutdown complete\n");
+}
+
static const struct dev_pm_ops qpnp_fg_pm_ops = {
.suspend = fg_suspend,
.resume = fg_resume,
};
+static int fg_reset_lockup_set(const char *val, const struct kernel_param *kp)
+{
+ int rc;
+ struct power_supply *bms_psy;
+ struct fg_chip *chip;
+ int old_val = fg_reset_on_lockup;
+
+ rc = param_set_int(val, kp);
+ if (rc) {
+ pr_err("Unable to set fg_reset_on_lockup: %d\n", rc);
+ return rc;
+ }
+
+ if (fg_reset_on_lockup != 0 && fg_reset_on_lockup != 1) {
+ pr_err("Bad value %d\n", fg_reset_on_lockup);
+ fg_reset_on_lockup = old_val;
+ return -EINVAL;
+ }
+
+ bms_psy = power_supply_get_by_name("bms");
+ if (!bms_psy) {
+ pr_err("bms psy not found\n");
+ return 0;
+ }
+
+ chip = power_supply_get_drvdata(bms_psy);
+ if (!chip->ima_supported) {
+ pr_err("Cannot set this for non-IMA supported FG\n");
+ fg_reset_on_lockup = old_val;
+ return -EINVAL;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("fg_reset_on_lockup set to %d\n", fg_reset_on_lockup);
+
+ if (fg_reset_on_lockup)
+ schedule_delayed_work(&chip->check_sanity_work,
+ msecs_to_jiffies(1000));
+ else
+ cancel_delayed_work_sync(&chip->check_sanity_work);
+
+ return rc;
+}
+
+static struct kernel_param_ops fg_reset_ops = {
+ .set = fg_reset_lockup_set,
+ .get = param_get_int,
+};
+
+module_param_cb(reset_on_lockup, &fg_reset_ops, &fg_reset_on_lockup, 0644);
+
static int fg_sense_type_set(const char *val, const struct kernel_param *kp)
{
int rc;
@@ -7025,6 +9177,7 @@
},
.probe = fg_probe,
.remove = fg_remove,
+ .shutdown = fg_shutdown,
};
static int __init fg_init(void)
diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c
index 4fd659e..65a74c7 100644
--- a/drivers/power/supply/qcom/qpnp-smb5.c
+++ b/drivers/power/supply/qcom/qpnp-smb5.c
@@ -244,7 +244,7 @@
chg->use_extcon = true;
chg->name = "pmi632_charger";
/* PMI632 does not support PD */
- __pd_disabled = 1;
+ chg->pd_not_supported = true;
chg->hw_max_icl_ua =
(chip->dt.usb_icl_ua > 0) ? chip->dt.usb_icl_ua
: PMI632_MAX_ICL_UA;
@@ -1491,14 +1491,18 @@
return rc;
}
- /* configure VCONN for software control */
- rc = smblib_masked_write(chg, TYPE_C_VCONN_CONTROL_REG,
+ /* Keep VCONN in h/w controlled mode for PMI632 */
+ if (chg->smb_version != PMI632_SUBTYPE) {
+ /* configure VCONN for software control */
+ rc = smblib_masked_write(chg, TYPE_C_VCONN_CONTROL_REG,
VCONN_EN_SRC_BIT | VCONN_EN_VALUE_BIT,
VCONN_EN_SRC_BIT);
- if (rc < 0) {
- dev_err(chg->dev,
- "Couldn't configure VCONN for SW control rc=%d\n", rc);
- return rc;
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't configure VCONN for SW control rc=%d\n",
+ rc);
+ return rc;
+ }
}
return rc;
@@ -1594,15 +1598,31 @@
/*
* PMI632 based hw init:
+ * - Enable STAT pin function on SMB_EN
* - Rerun APSD to ensure proper charger detection if device
* boots with charger connected.
* - Initialize flash module for PMI632
*/
if (chg->smb_version == PMI632_SUBTYPE) {
+ rc = smblib_masked_write(chg, MISC_SMB_EN_CMD_REG,
+ EN_STAT_CMD_BIT, EN_STAT_CMD_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure SMB_EN rc=%d\n",
+ rc);
+ return rc;
+ }
+
schgm_flash_init(chg);
smblib_rerun_apsd_if_required(chg);
}
+ /* clear the ICL override if it is set */
+ rc = smblib_icl_override(chg, false);
+ if (rc < 0) {
+ pr_err("Couldn't disable ICL override rc=%d\n", rc);
+ return rc;
+ }
+
/* vote 0mA on usb_icl for non battery platforms */
vote(chg->usb_icl_votable,
DEFAULT_VOTER, chip->dt.no_battery, 0);
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 39005f6..86f6638 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -3445,6 +3445,9 @@
struct smb_irq_data *irq_data = data;
struct smb_charger *chg = irq_data->parent_data;
struct storm_watch *wdata;
+ const struct apsd_result *apsd = smblib_get_apsd_result(chg);
+ int rc;
+ u8 stat = 0, max_pulses = 0;
smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
if (!chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data)
@@ -3452,6 +3455,46 @@
wdata = &chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data->storm_data;
reset_storm_count(wdata);
+
+ if (!chg->non_compliant_chg_detected &&
+ apsd->pst == POWER_SUPPLY_TYPE_USB_HVDCP) {
+ rc = smblib_read(chg, QC_CHANGE_STATUS_REG, &stat);
+ if (rc < 0)
+ smblib_err(chg,
+ "Couldn't read CHANGE_STATUS_REG rc=%d\n", rc);
+
+ if (stat & QC_5V_BIT)
+ return IRQ_HANDLED;
+
+ rc = smblib_read(chg, HVDCP_PULSE_COUNT_MAX_REG, &max_pulses);
+ if (rc < 0)
+ smblib_err(chg,
+ "Couldn't read QC2 max pulses rc=%d\n", rc);
+
+ chg->non_compliant_chg_detected = true;
+ chg->qc2_max_pulses = (max_pulses &
+ HVDCP_PULSE_COUNT_MAX_QC2_MASK);
+
+ if (stat & QC_12V_BIT) {
+ rc = smblib_masked_write(chg, HVDCP_PULSE_COUNT_MAX_REG,
+ HVDCP_PULSE_COUNT_MAX_QC2_MASK,
+ HVDCP_PULSE_COUNT_MAX_QC2_9V);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't force max pulses to 9V rc=%d\n",
+ rc);
+
+ } else if (stat & QC_9V_BIT) {
+ rc = smblib_masked_write(chg, HVDCP_PULSE_COUNT_MAX_REG,
+ HVDCP_PULSE_COUNT_MAX_QC2_MASK,
+ HVDCP_PULSE_COUNT_MAX_QC2_5V);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't force max pulses to 5V rc=%d\n",
+ rc);
+
+ }
+ smblib_rerun_apsd(chg);
+ }
+
return IRQ_HANDLED;
}
@@ -4270,6 +4313,17 @@
if (rc < 0)
smblib_err(chg, "Couldn't set 120mS tCC debounce rc=%d\n", rc);
+ /* if non-compliant charger caused UV, restore original max pulses */
+ if (chg->non_compliant_chg_detected) {
+ rc = smblib_masked_write(chg, HVDCP_PULSE_COUNT_MAX_REG,
+ HVDCP_PULSE_COUNT_MAX_QC2_MASK,
+ chg->qc2_max_pulses);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't restore max pulses rc=%d\n",
+ rc);
+ chg->non_compliant_chg_detected = false;
+ }
+
/* enable APSD CC trigger for next insertion */
rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
APSD_START_ON_CC_BIT, APSD_START_ON_CC_BIT);
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index dc8cbc7..3129861 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -361,6 +361,8 @@
bool try_sink_active;
int boost_current_ua;
int temp_speed_reading_count;
+ int qc2_max_pulses;
+ bool non_compliant_chg_detected;
bool fake_usb_insertion;
/* extcon for VBUS / ID notification to USB for uUSB */
diff --git a/drivers/power/supply/qcom/smb-reg.h b/drivers/power/supply/qcom/smb-reg.h
index d40d6fd..449d974 100644
--- a/drivers/power/supply/qcom/smb-reg.h
+++ b/drivers/power/supply/qcom/smb-reg.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -587,6 +587,15 @@
#define EN_LEGACY_CABLE_DETECTION_BIT BIT(1)
#define ALLOW_PD_DRING_UFP_TCCDB_BIT BIT(0)
+#define HVDCP_PULSE_COUNT_MAX_REG (USBIN_BASE + 0x5B)
+#define HVDCP_PULSE_COUNT_MAX_QC2_MASK GENMASK(7, 6)
+enum {
+ HVDCP_PULSE_COUNT_MAX_QC2_5V,
+ HVDCP_PULSE_COUNT_MAX_QC2_9V,
+ HVDCP_PULSE_COUNT_MAX_QC2_12V,
+ HVDCP_PULSE_COUNT_MAX_QC2_INVALID
+};
+
#define USBIN_ADAPTER_ALLOW_CFG_REG (USBIN_BASE + 0x60)
#define USBIN_ADAPTER_ALLOW_MASK GENMASK(3, 0)
enum {
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
index f1df8f0..86ecda5 100644
--- a/drivers/power/supply/qcom/smb138x-charger.c
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -1371,10 +1371,12 @@
[USBIN_PLUGIN_IRQ] = {
.name = "usbin-plugin",
.handler = smblib_handle_usb_plugin,
+ .wake = true,
},
[USBIN_SRC_CHANGE_IRQ] = {
.name = "usbin-src-change",
.handler = smblib_handle_usb_source_change,
+ .wake = true,
},
[USBIN_ICL_CHANGE_IRQ] = {
.name = "usbin-icl-change",
@@ -1383,6 +1385,7 @@
[TYPE_C_CHANGE_IRQ] = {
.name = "type-c-change",
.handler = smblib_handle_usb_typec_change,
+ .wake = true,
},
/* DC INPUT IRQs */
[DCIN_COLLAPSE_IRQ] = {
@@ -1825,6 +1828,8 @@
goto cleanup;
}
+ device_init_wakeup(chip->chg.dev, true);
+
pr_info("SMB138X probed successfully mode=%d\n", chip->chg.mode);
return rc;
diff --git a/drivers/power/supply/qcom/smb1390-charger.c b/drivers/power/supply/qcom/smb1390-charger.c
index 55a1f45..91d215e 100644
--- a/drivers/power/supply/qcom/smb1390-charger.c
+++ b/drivers/power/supply/qcom/smb1390-charger.c
@@ -100,6 +100,7 @@
struct regmap *regmap;
struct notifier_block nb;
struct class cp_class;
+ struct wakeup_source *cp_ws;
/* work structs */
struct work_struct status_change_work;
@@ -114,6 +115,7 @@
struct votable *pl_disable_votable;
struct votable *fcc_votable;
struct votable *hvdcp_hw_inov_dis_votable;
+ struct votable *cp_awake_votable;
/* power supplies */
struct power_supply *usb_psy;
@@ -378,11 +380,12 @@
if (rc < 0)
return rc;
- vote(chip->hvdcp_hw_inov_dis_votable, CP_VOTER, false, 0);
vote(chip->pl_disable_votable, CP_VOTER, false, 0);
+ vote(chip->cp_awake_votable, CP_VOTER, false, 0);
} else {
vote(chip->hvdcp_hw_inov_dis_votable, CP_VOTER, true, 0);
vote(chip->pl_disable_votable, CP_VOTER, true, 0);
+ vote(chip->cp_awake_votable, CP_VOTER, true, 0);
rc = smb1390_masked_write(chip, CORE_CONTROL1_REG,
CMD_EN_SWITCHER_BIT, CMD_EN_SWITCHER_BIT);
if (rc < 0)
@@ -428,6 +431,20 @@
return rc;
}
+static int smb1390_awake_vote_cb(struct votable *votable, void *data,
+ int awake, const char *client)
+{
+ struct smb1390 *chip = data;
+
+ if (awake)
+ __pm_stay_awake(chip->cp_ws);
+ else
+ __pm_relax(chip->cp_ws);
+
+ pr_debug("client: %s awake: %d\n", client, awake);
+ return 0;
+}
+
static int smb1390_notifier_cb(struct notifier_block *nb,
unsigned long event, void *data)
{
@@ -589,6 +606,11 @@
if (IS_ERR(chip->ilim_votable))
return PTR_ERR(chip->ilim_votable);
+ chip->cp_awake_votable = create_votable("CP_AWAKE", VOTE_SET_ANY,
+ smb1390_awake_vote_cb, chip);
+ if (IS_ERR(chip->cp_awake_votable))
+ return PTR_ERR(chip->cp_awake_votable);
+
return 0;
}
@@ -722,16 +744,21 @@
rc = smb1390_parse_dt(chip);
if (rc < 0) {
pr_err("Couldn't parse device tree rc=%d\n", rc);
- goto out_work;
+ return rc;
}
chip->vadc_dev = qpnp_get_vadc(chip->dev, "smb");
if (IS_ERR(chip->vadc_dev)) {
rc = PTR_ERR(chip->vadc_dev);
- pr_err("Couldn't get vadc dev rc=%d\n", rc);
- goto out_work;
+ if (rc != -EPROBE_DEFER)
+ pr_err("Couldn't get vadc dev rc=%d\n", rc);
+ return rc;
}
+ chip->cp_ws = wakeup_source_register("qcom-chargepump");
+ if (!chip->cp_ws)
+ return rc;
+
rc = smb1390_create_votables(chip);
if (rc < 0) {
pr_err("Couldn't create votables rc=%d\n", rc);
@@ -778,6 +805,7 @@
out_work:
cancel_work(&chip->taper_work);
cancel_work(&chip->status_change_work);
+ wakeup_source_unregister(chip->cp_ws);
return rc;
}
@@ -790,8 +818,10 @@
/* explicitly disable charging */
vote(chip->disable_votable, USER_VOTER, true, 0);
+ vote(chip->hvdcp_hw_inov_dis_votable, CP_VOTER, false, 0);
cancel_work(&chip->taper_work);
cancel_work(&chip->status_change_work);
+ wakeup_source_unregister(chip->cp_ws);
smb1390_destroy_votables(chip);
return 0;
}
diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c
index a5af817..9f39561 100644
--- a/drivers/power/supply/qcom/smb5-lib.c
+++ b/drivers/power/supply/qcom/smb5-lib.c
@@ -116,6 +116,19 @@
return 0;
}
+int smblib_icl_override(struct smb_charger *chg, bool override)
+{
+ int rc;
+
+ rc = smblib_masked_write(chg, USBIN_LOAD_CFG_REG,
+ ICL_OVERRIDE_AFTER_APSD_BIT,
+ override ? ICL_OVERRIDE_AFTER_APSD_BIT : 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't override ICL rc=%d\n", rc);
+
+ return rc;
+}
+
int smblib_stat_sw_override_cfg(struct smb_charger *chg, bool override)
{
int rc = 0;
@@ -517,7 +530,7 @@
int rc;
u8 mask = HVDCP_EN_BIT | BC1P2_SRC_DETECT_BIT;
- if (chg->pd_disabled)
+ if (chg->pd_not_supported)
return 0;
rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG, mask,
@@ -871,7 +884,7 @@
int smblib_set_icl_current(struct smb_charger *chg, int icl_ua)
{
int rc = 0;
- bool hc_mode = false;
+ bool hc_mode = false, override = false;
/* suspend and return if 25mA or less is requested */
if (icl_ua <= USBIN_25MA)
@@ -897,6 +910,13 @@
goto out;
}
hc_mode = true;
+
+ /*
+ * Micro USB mode follows ICL register independent of override
+ * bit, configure override only for typeC mode.
+ */
+ if (chg->connector_type == POWER_SUPPLY_CONNECTOR_TYPEC)
+ override = true;
}
set_mode:
@@ -907,6 +927,12 @@
goto out;
}
+ rc = smblib_icl_override(chg, override);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set ICL override rc=%d\n", rc);
+ goto out;
+ }
+
/* unsuspend after configuring current and override */
rc = smblib_set_usb_suspend(chg, false);
if (rc < 0) {
@@ -3078,7 +3104,8 @@
}
if (!chg->pr_swap_in_progress)
- chg->ok_to_pd = !(*chg->pd_disabled) || chg->early_usb_attach;
+ chg->ok_to_pd = (!(*chg->pd_disabled) || chg->early_usb_attach)
+ && !chg->pd_not_supported;
}
static void typec_src_insertion(struct smb_charger *chg)
@@ -3097,8 +3124,8 @@
}
chg->typec_legacy = stat & TYPEC_LEGACY_CABLE_STATUS_BIT;
- chg->ok_to_pd = !(chg->typec_legacy || *chg->pd_disabled)
- || chg->early_usb_attach;
+ chg->ok_to_pd = (!(chg->typec_legacy || *chg->pd_disabled)
+ || chg->early_usb_attach) && !chg->pd_not_supported;
if (!chg->ok_to_pd) {
rc = smblib_configure_hvdcp_apsd(chg, true);
if (rc < 0) {
diff --git a/drivers/power/supply/qcom/smb5-lib.h b/drivers/power/supply/qcom/smb5-lib.h
index 14eba8c..7c02468 100644
--- a/drivers/power/supply/qcom/smb5-lib.h
+++ b/drivers/power/supply/qcom/smb5-lib.h
@@ -272,6 +272,7 @@
int otg_delay_ms;
int *weak_chg_icl_ua;
struct qpnp_vadc_chip *vadc_dev;
+ bool pd_not_supported;
/* locks */
struct mutex lock;
@@ -539,6 +540,7 @@
int smblib_configure_wdog(struct smb_charger *chg, bool enable);
int smblib_force_vbus_voltage(struct smb_charger *chg, u8 val);
int smblib_configure_hvdcp_apsd(struct smb_charger *chg, bool enable);
+int smblib_icl_override(struct smb_charger *chg, bool override);
int smblib_init(struct smb_charger *chg);
int smblib_deinit(struct smb_charger *chg);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 57a3ee0..3ccc858 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2044,6 +2044,8 @@
if (!shost->use_clustering)
q->limits.cluster = 0;
+ if (shost->inlinecrypt_support)
+ queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, q);
/*
* Set a reasonable default alignment: The larger of 32-byte (dword),
* which is a common minimum for HBAs, and the minimum DMA alignment,
diff --git a/drivers/scsi/ufs/ufs-qcom-ice.c b/drivers/scsi/ufs/ufs-qcom-ice.c
index b1c86d4..d4fe6ee 100644
--- a/drivers/scsi/ufs/ufs-qcom-ice.c
+++ b/drivers/scsi/ufs/ufs-qcom-ice.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -172,17 +172,15 @@
static void ufs_qcom_ice_cfg_work(struct work_struct *work)
{
unsigned long flags;
- struct ice_data_setting ice_set;
struct ufs_qcom_host *qcom_host =
container_of(work, struct ufs_qcom_host, ice_cfg_work);
- struct request *req_pending = NULL;
if (!qcom_host->ice.vops->config_start)
return;
spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
- req_pending = qcom_host->req_pending;
- if (!req_pending) {
+ if (!qcom_host->req_pending) {
+ qcom_host->work_pending = false;
spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
return;
}
@@ -191,24 +189,15 @@
/*
* config_start is called again as previous attempt returned -EAGAIN,
* this call shall now take care of the necessary key setup.
- * 'ice_set' will not actually be used, instead the next call to
- * config_start() for this request, in the normal call flow, will
- * succeed as the key has now been setup.
*/
qcom_host->ice.vops->config_start(qcom_host->ice.pdev,
- qcom_host->req_pending, &ice_set, false);
+ qcom_host->req_pending, NULL, false);
spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
qcom_host->req_pending = NULL;
+ qcom_host->work_pending = false;
spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
- /*
- * Resume with requests processing. We assume config_start has been
- * successful, but even if it wasn't we still must resume in order to
- * allow for the request to be retried.
- */
- ufshcd_scsi_unblock_requests(qcom_host->hba);
-
}
/**
@@ -294,18 +283,14 @@
* requires a non-atomic context, this means we should
* call the function again from the worker thread to do
* the configuration. For this request the error will
- * propagate so it will be re-queued and until the
- * configuration is is completed we block further
- * request processing.
+ * propagate so it will be re-queued.
*/
if (err == -EAGAIN) {
dev_dbg(qcom_host->hba->dev,
"%s: scheduling task for ice setup\n",
__func__);
- if (!qcom_host->req_pending) {
- ufshcd_scsi_block_requests(
- qcom_host->hba);
+ if (!qcom_host->work_pending) {
qcom_host->req_pending = cmd->request;
if (!queue_work(ice_workqueue,
@@ -316,10 +301,9 @@
&qcom_host->ice_work_lock,
flags);
- ufshcd_scsi_unblock_requests(
- qcom_host->hba);
return err;
}
+ qcom_host->work_pending = true;
}
} else {
@@ -418,9 +402,7 @@
* requires a non-atomic context, this means we should
* call the function again from the worker thread to do
* the configuration. For this request the error will
- * propagate so it will be re-queued and until the
- * configuration is is completed we block further
- * request processing.
+ * propagate so it will be re-queued.
*/
if (err == -EAGAIN) {
@@ -428,9 +410,8 @@
"%s: scheduling task for ice setup\n",
__func__);
- if (!qcom_host->req_pending) {
- ufshcd_scsi_block_requests(
- qcom_host->hba);
+ if (!qcom_host->work_pending) {
+
qcom_host->req_pending = cmd->request;
if (!queue_work(ice_workqueue,
&qcom_host->ice_cfg_work)) {
@@ -440,10 +421,9 @@
&qcom_host->ice_work_lock,
flags);
- ufshcd_scsi_unblock_requests(
- qcom_host->hba);
return err;
}
+ qcom_host->work_pending = true;
}
} else {
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 39ab28a..8d867a2 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -897,11 +897,18 @@
req = lrbp->cmd->request;
else
return 0;
-
- /* Use request LBA as the DUN value */
- if (req->bio)
- *dun = (req->bio->bi_iter.bi_sector) >>
- UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
+ /*
+ * Right now ICE do not support variable dun but can be
+ * taken as future enhancement
+ * if (bio_dun(req->bio)) {
+ * dun @bio can be split, so we have to adjust offset
+ * *dun = bio_dun(req->bio);
+ * } else
+ */
+ if (req->bio) {
+ *dun = req->bio->bi_iter.bi_sector;
+ *dun >>= UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
+ }
ret = ufs_qcom_ice_req_setup(host, lrbp->cmd, cc_index, enable);
@@ -2133,6 +2140,8 @@
dev_err(dev, "%s: ufs_qcom_ice_get_dev failed %d\n",
__func__, err);
goto out_host_free;
+ } else {
+ hba->host->inlinecrypt_support = 1;
}
host->generic_phy = devm_phy_get(dev, "ufsphy");
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index a03ecb0..27f6a05 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -375,6 +375,7 @@
struct work_struct ice_cfg_work;
struct request *req_pending;
struct ufs_vreg *vddp_ref_clk;
+ bool work_pending;
};
static inline u32
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 5fafaca..3ecca59 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -684,6 +684,17 @@
determines last CPU to call into PSCI for cluster Low power
modes.
+config MSM_PM_LEGACY
+ depends on PM
+ select MSM_IDLE_STATS if DEBUG_FS
+ select CPU_IDLE_MULTIPLE_DRIVERS
+ bool "Qualcomm platform specific Legacy PM driver"
+ help
+ Platform specific legacy power driver to manage
+ cores and l2 low power modes. It interface with
+ various system driver and put the cores into
+ low power modes.
+
config MSM_NOPM
default y if !PM
bool
@@ -700,7 +711,7 @@
trusted apps, unloading them and marshalling buffers to the
trusted fingerprint app.
-if MSM_PM
+if (MSM_PM || MSM_PM_LEGACY)
menuconfig MSM_IDLE_STATS
bool "Collect idle statistics"
help
@@ -733,7 +744,7 @@
histogram. This is for collecting statistics on suspend.
endif # MSM_IDLE_STATS
-endif # MSM_PM
+endif # MSM_PM || MSM_PM_LEGACY
config QCOM_DCC_V2
bool "Qualcomm Technologies Data Capture and Compare engine support for V2"
@@ -868,3 +879,11 @@
interrupt event and event data.
source "drivers/soc/qcom/wcnss/Kconfig"
+
+config BIG_CLUSTER_MIN_FREQ_ADJUST
+ bool "Adjust BIG cluster min frequency based on power collapse state"
+ default n
+ help
+ This driver is used to set the floor of the min frequency of big cluster
+ to the user specified value when the cluster is not power collapsed. When
+ the cluster is power collpsed it resets the value to physical limits.
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index b83f554..0b71121 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -14,6 +14,7 @@
obj-$(CONFIG_QCOM_SMD) += smd.o
obj-$(CONFIG_QCOM_SMD_RPM) += smd-rpm.o
obj-$(CONFIG_QCOM_SMEM) += smem.o
+obj-$(CONFIG_MSM_PM_LEGACY) += pm-boot.o msm-pm.o
obj-$(CONFIG_MSM_SPM) += msm-spm.o spm_devices.o
obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o
obj-$(CONFIG_QCOM_SMP2P) += smp2p.o
@@ -105,3 +106,4 @@
obj-$(CONFIG_QCOM_QDSS_BRIDGE) += qdss_bridge.o
obj-$(CONFIG_MSM_BAM_DMUX) += bam_dmux.o
obj-$(CONFIG_WCNSS_CORE) += wcnss/
+obj-$(CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST) += big_cluster_min_freq_adjust.o
diff --git a/drivers/soc/qcom/bg_rsb.c b/drivers/soc/qcom/bg_rsb.c
index fdfd7b7..02c6deb 100644
--- a/drivers/soc/qcom/bg_rsb.c
+++ b/drivers/soc/qcom/bg_rsb.c
@@ -33,7 +33,7 @@
#define BGRSB_GLINK_INTENT_SIZE 0x04
#define BGRSB_MSG_SIZE 0x08
-#define TIMEOUT_MS 500
+#define TIMEOUT_MS 2000
#define BGRSB_LDO15_VTG_MIN_UV 3300000
#define BGRSB_LDO15_VTG_MAX_UV 3300000
@@ -544,7 +544,7 @@
rc = wait_event_timeout(dev->link_state_wait,
(dev->chnl_state == true),
- msecs_to_jiffies(TIMEOUT_MS*2));
+ msecs_to_jiffies(TIMEOUT_MS));
if (rc == 0) {
pr_err("Glink channel connection time out\n");
return;
@@ -574,7 +574,7 @@
rc = wait_event_timeout(dev->link_state_wait,
(dev->chnl_state == true),
- msecs_to_jiffies(TIMEOUT_MS*2));
+ msecs_to_jiffies(TIMEOUT_MS));
if (rc == 0) {
pr_err("Glink channel connection time out\n");
return;
@@ -968,9 +968,9 @@
return 0;
}
-static int bg_rsb_resume(struct platform_device *pdev)
+static int bg_rsb_resume(struct device *pldev)
{
- int rc;
+ struct platform_device *pdev = to_platform_device(pldev);
struct bgrsb_priv *dev = platform_get_drvdata(pdev);
if (dev->bgrsb_current_state == BGRSB_STATE_RSB_CONFIGURED)
@@ -978,12 +978,6 @@
if (dev->bgrsb_current_state == BGRSB_STATE_INIT) {
if (bgrsb_ldo_work(dev, BGRSB_ENABLE_LDO11) == 0) {
- rc = bgrsb_configr_rsb(dev, true);
- if (rc != 0) {
- pr_err("BG failed to configure RSB %d\n", rc);
- bgrsb_ldo_work(dev, BGRSB_DISABLE_LDO11);
- return rc;
- }
dev->bgrsb_current_state = BGRSB_STATE_RSB_CONFIGURED;
pr_debug("RSB Cofigured\n");
return 0;
@@ -993,8 +987,9 @@
return -EINVAL;
}
-static int bg_rsb_suspend(struct platform_device *pdev, pm_message_t state)
+static int bg_rsb_suspend(struct device *pldev)
{
+ struct platform_device *pdev = to_platform_device(pldev);
struct bgrsb_priv *dev = platform_get_drvdata(pdev);
if (dev->bgrsb_current_state == BGRSB_STATE_INIT)
@@ -1021,15 +1016,19 @@
{ }
};
+static const struct dev_pm_ops pm_rsb = {
+ .resume = bg_rsb_resume,
+ .suspend = bg_rsb_suspend,
+};
+
static struct platform_driver bg_rsb_driver = {
.driver = {
.name = "bg-rsb",
.of_match_table = bg_rsb_of_match,
+ .pm = &pm_rsb,
},
.probe = bg_rsb_probe,
.remove = bg_rsb_remove,
- .resume = bg_rsb_resume,
- .suspend = bg_rsb_suspend,
};
module_platform_driver(bg_rsb_driver);
diff --git a/drivers/soc/qcom/bgcom_interface.c b/drivers/soc/qcom/bgcom_interface.c
index efef26d..1cde8c6 100644
--- a/drivers/soc/qcom/bgcom_interface.c
+++ b/drivers/soc/qcom/bgcom_interface.c
@@ -43,7 +43,8 @@
#define BGDAEMON_LDO03_LPM_VTG 0
#define BGDAEMON_LDO03_NPM_VTG 10000
-#define MPPS_DOWN_EVENT_TO_BG_TIMEOUT 100
+#define MPPS_DOWN_EVENT_TO_BG_TIMEOUT 3000
+#define SLEEP_FOR_SPI_BUS 2000
enum {
SSR_DOMAIN_BG,
@@ -93,6 +94,7 @@
static dev_t bg_dev;
static int device_open;
static void *handle;
+static bool twm_exit;
static struct bgcom_open_config_type config_type;
static DECLARE_COMPLETION(bg_modem_down_wait);
@@ -353,6 +355,8 @@
break;
case SET_SPI_BUSY:
ret = bgcom_set_spi_state(BGCOM_SPI_BUSY);
+ /* Add sleep for SPI Bus to release*/
+ msleep(SLEEP_FOR_SPI_BUS);
break;
case BG_SOFT_RESET:
ret = bg_soft_reset();
@@ -360,6 +364,10 @@
case BG_MODEM_DOWN2_BG_DONE:
ret = modem_down2_bg();
break;
+ case BG_TWM_EXIT:
+ twm_exit = true;
+ ret = 0;
+ break;
default:
ret = -ENOIOCTLCMD;
}
@@ -515,6 +523,10 @@
bgcom_set_spi_state(BGCOM_SPI_BUSY);
send_uevent(&bge);
break;
+ case SUBSYS_AFTER_SHUTDOWN:
+ /* Add sleep for SPI Bus to release*/
+ msleep(SLEEP_FOR_SPI_BUS);
+ break;
case SUBSYS_AFTER_POWERUP:
bge.e_type = BG_AFTER_POWER_UP;
bgdaemon_ldowork(DISABLE_LDO03);
@@ -555,6 +567,16 @@
return NOTIFY_DONE;
}
+bool is_twm_exit(void)
+{
+ if (twm_exit) {
+ twm_exit = false;
+ return true;
+ }
+ return false;
+}
+EXPORT_SYMBOL(is_twm_exit);
+
static struct notifier_block ssr_modem_nb = {
.notifier_call = ssr_modem_cb,
.priority = 0,
diff --git a/drivers/soc/qcom/bgcom_interface.h b/drivers/soc/qcom/bgcom_interface.h
index 500ca6d..235995e 100644
--- a/drivers/soc/qcom/bgcom_interface.h
+++ b/drivers/soc/qcom/bgcom_interface.h
@@ -13,10 +13,17 @@
#ifndef BGCOM_INTERFACE_H
#define BGCOM_INTERFACE_H
-/**
+/*
* bg_soft_reset() - soft reset Blackghost
* Return 0 on success or -Ve on error
*/
int bg_soft_reset(void);
+/*
+ * is_twm_exit()
+ * Return true if device is booting up on TWM exit.
+ * value is auto cleared once read.
+ */
+bool is_twm_exit(void);
+
#endif /* BGCOM_INTERFACE_H */
diff --git a/drivers/soc/qcom/bgcom_spi.c b/drivers/soc/qcom/bgcom_spi.c
index b8e3b84..d2dc05f 100644
--- a/drivers/soc/qcom/bgcom_spi.c
+++ b/drivers/soc/qcom/bgcom_spi.c
@@ -25,6 +25,7 @@
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <linux/kthread.h>
+#include <linux/dma-mapping.h>
#include "bgcom.h"
#include "bgrsb.h"
#include "bgcom_interface.h"
@@ -43,12 +44,11 @@
#define BG_SPI_MAX_WORDS (0x3FFFFFFD)
#define BG_SPI_MAX_REGS (0x0A)
-#define SLEEP_IN_STATE_CHNG 2000
#define HED_EVENT_ID_LEN (0x02)
#define HED_EVENT_SIZE_LEN (0x02)
#define HED_EVENT_DATA_STRT_LEN (0x05)
-#define MAX_RETRY 200
+#define MAX_RETRY 500
enum bgcom_state {
/*BGCOM Staus ready*/
@@ -63,6 +63,7 @@
BGCOM_READ_REG = 0,
BGCOM_READ_FIFO = 1,
BGCOM_READ_AHB = 2,
+ BGCOM_WRITE_REG = 3,
};
struct bg_spi_priv {
@@ -112,6 +113,17 @@
static struct mutex bg_resume_mutex;
+static atomic_t bg_is_spi_active;
+static int bg_irq;
+
+static struct spi_device *get_spi_device(void)
+{
+ struct bg_spi_priv *bg_spi = container_of(bg_com_drv,
+ struct bg_spi_priv, lhandle);
+ struct spi_device *spi = bg_spi->spi;
+ return spi;
+}
+
static void augmnt_fifo(uint8_t *data, int pos)
{
data[pos] = '\0';
@@ -149,8 +161,6 @@
mutex_lock(&bg_spi->xfer_mutex);
spi_state = state;
- if (spi_state == BGCOM_SPI_BUSY)
- msleep(SLEEP_IN_STATE_CHNG);
mutex_unlock(&bg_spi->xfer_mutex);
return 0;
}
@@ -197,6 +207,10 @@
case BGCOM_READ_FIFO:
ret = bgcom_fifo_read(&clnt_handle, no_of_words, buf);
break;
+ case BGCOM_WRITE_REG:
+ ret = bgcom_reg_write(&clnt_handle, BG_CMND_REG,
+ no_of_words, buf);
+ break;
case BGCOM_READ_AHB:
break;
}
@@ -232,6 +246,9 @@
tx_xfer = &bg_spi->xfer1;
spi = bg_spi->spi;
+ if (!atomic_read(&bg_is_spi_active))
+ return -ECANCELED;
+
mutex_lock(&bg_spi->xfer_mutex);
bg_spi_reinit_xfer(tx_xfer);
tx_xfer->tx_buf = tx_buf;
@@ -443,6 +460,7 @@
int bgcom_ahb_read(void *handle, uint32_t ahb_start_addr,
uint32_t num_words, void *read_buf)
{
+ dma_addr_t dma_hndl_tx, dma_hndl_rx;
uint32_t txn_len;
uint8_t *tx_buf;
uint8_t *rx_buf;
@@ -450,6 +468,7 @@
int ret;
uint8_t cmnd = 0;
uint32_t ahb_addr = 0;
+ struct spi_device *spi = get_spi_device();
if (!handle || !read_buf || num_words == 0
|| num_words > BG_SPI_MAX_WORDS) {
@@ -472,15 +491,16 @@
size = num_words*BG_SPI_WORD_SIZE;
txn_len = BG_SPI_AHB_READ_CMD_LEN + size;
- tx_buf = kzalloc(txn_len, GFP_KERNEL);
+ tx_buf = dma_zalloc_coherent(&spi->dev, txn_len,
+ &dma_hndl_tx, GFP_KERNEL);
if (!tx_buf)
return -ENOMEM;
- rx_buf = kzalloc(txn_len, GFP_KERNEL);
-
+ rx_buf = dma_zalloc_coherent(&spi->dev, txn_len,
+ &dma_hndl_rx, GFP_KERNEL);
if (!rx_buf) {
- kfree(tx_buf);
+ dma_free_coherent(&spi->dev, txn_len, tx_buf, dma_hndl_tx);
return -ENOMEM;
}
@@ -495,8 +515,8 @@
if (!ret)
memcpy(read_buf, rx_buf+BG_SPI_AHB_READ_CMD_LEN, size);
- kfree(tx_buf);
- kfree(rx_buf);
+ dma_free_coherent(&spi->dev, txn_len, tx_buf, dma_hndl_tx);
+ dma_free_coherent(&spi->dev, txn_len, rx_buf, dma_hndl_rx);
return ret;
}
EXPORT_SYMBOL(bgcom_ahb_read);
@@ -504,12 +524,14 @@
int bgcom_ahb_write(void *handle, uint32_t ahb_start_addr,
uint32_t num_words, void *write_buf)
{
+ dma_addr_t dma_hndl;
uint32_t txn_len;
uint8_t *tx_buf;
uint32_t size;
int ret;
uint8_t cmnd = 0;
uint32_t ahb_addr = 0;
+ struct spi_device *spi = get_spi_device();
if (!handle || !write_buf || num_words == 0
|| num_words > BG_SPI_MAX_WORDS) {
@@ -532,9 +554,7 @@
size = num_words*BG_SPI_WORD_SIZE;
txn_len = BG_SPI_AHB_CMD_LEN + size;
-
- tx_buf = kzalloc(txn_len, GFP_KERNEL);
-
+ tx_buf = dma_zalloc_coherent(&spi->dev, txn_len, &dma_hndl, GFP_KERNEL);
if (!tx_buf)
return -ENOMEM;
@@ -546,7 +566,7 @@
memcpy(tx_buf+BG_SPI_AHB_CMD_LEN, write_buf, size);
ret = bgcom_transfer(handle, tx_buf, NULL, txn_len);
- kfree(tx_buf);
+ dma_free_coherent(&spi->dev, txn_len, tx_buf, dma_hndl);
return ret;
}
EXPORT_SYMBOL(bgcom_ahb_write);
@@ -621,6 +641,11 @@
return -EBUSY;
}
+ if (bgcom_resume(handle)) {
+ pr_err("Failed to resume\n");
+ return -EBUSY;
+ }
+
size = num_words*BG_SPI_WORD_SIZE;
txn_len = BG_SPI_READ_LEN + size;
tx_buf = kzalloc(txn_len, GFP_KERNEL | GFP_ATOMIC);
@@ -671,6 +696,11 @@
return -EBUSY;
}
+ if (bgcom_resume(handle)) {
+ pr_err("Failed to resume\n");
+ return -EBUSY;
+ }
+
size = num_regs*BG_SPI_WORD_SIZE;
txn_len = BG_SPI_WRITE_CMND_LEN + size;
@@ -749,11 +779,18 @@
uint8_t rx_buf[8] = {0};
uint32_t cmnd_reg = 0;
+ if (spi_state == BGCOM_SPI_BUSY) {
+ printk_ratelimited("SPI is held by TZ\n");
+ goto ret_err;
+ }
+
txn_len = 0x08;
tx_buf[0] = 0x05;
ret = bgcom_transfer(handle, tx_buf, rx_buf, txn_len);
if (!ret)
memcpy(&cmnd_reg, rx_buf+BG_SPI_READ_LEN, 0x04);
+
+ret_err:
return cmnd_reg & BIT(31);
}
@@ -766,6 +803,9 @@
if (handle == NULL)
return -EINVAL;
+ if (!atomic_read(&bg_is_spi_active))
+ return -ECANCELED;
+
cntx = (struct bg_context *)handle;
bg_spi = cntx->bg_spi;
@@ -789,36 +829,15 @@
bg_soft_reset();
return -ETIMEDOUT;
}
- pr_info("BG retries for wake up : %d\n", retry);
return 0;
}
EXPORT_SYMBOL(bgcom_resume);
int bgcom_suspend(void *handle)
{
- struct bg_spi_priv *bg_spi;
- struct bg_context *cntx;
- uint32_t cmnd_reg = 0;
- int ret = 0;
-
- if (handle == NULL)
+ if (!handle)
return -EINVAL;
-
- cntx = (struct bg_context *)handle;
- bg_spi = cntx->bg_spi;
- mutex_lock(&bg_resume_mutex);
- if (bg_spi->bg_state == BGCOM_STATE_SUSPEND)
- goto unlock;
-
- cmnd_reg |= BIT(31);
- ret = bgcom_reg_write(handle, BG_CMND_REG, 1, &cmnd_reg);
- if (ret == 0)
- bg_spi->bg_state = BGCOM_STATE_SUSPEND;
-
-unlock:
- mutex_unlock(&bg_resume_mutex);
- pr_info("suspended with : %d\n", ret);
- return ret;
+ return 0;
}
EXPORT_SYMBOL(bgcom_suspend);
@@ -931,7 +950,6 @@
struct bg_spi_priv *bg_spi;
struct device_node *node;
int irq_gpio = 0;
- int bg_irq = 0;
int ret;
bg_spi = devm_kzalloc(&spi->dev, sizeof(*bg_spi),
@@ -969,6 +987,8 @@
if (ret)
goto err_ret;
+ atomic_set(&bg_is_spi_active, 1);
+ dma_set_coherent_mask(&spi->dev, DMA_BIT_MASK(64));
pr_info("Bgcom Probed successfully\n");
return ret;
@@ -990,6 +1010,46 @@
return 0;
}
+static int bgcom_pm_suspend(struct device *dev)
+{
+ uint32_t cmnd_reg = 0;
+ struct spi_device *s_dev = to_spi_device(dev);
+ struct bg_spi_priv *bg_spi = spi_get_drvdata(s_dev);
+ int ret = 0;
+
+ if (bg_spi->bg_state == BGCOM_STATE_SUSPEND)
+ return 0;
+
+ cmnd_reg |= BIT(31);
+ ret = read_bg_locl(BGCOM_WRITE_REG, 1, &cmnd_reg);
+ if (ret == 0) {
+ bg_spi->bg_state = BGCOM_STATE_SUSPEND;
+ atomic_set(&bg_is_spi_active, 0);
+ }
+ pr_info("suspended with : %d\n", ret);
+ return ret;
+}
+
+static int bgcom_pm_resume(struct device *dev)
+{
+ struct bg_context clnt_handle;
+ int ret;
+ struct bg_spi_priv *spi =
+ container_of(bg_com_drv, struct bg_spi_priv, lhandle);
+
+ clnt_handle.bg_spi = spi;
+ atomic_set(&bg_is_spi_active, 1);
+ ret = bgcom_resume(&clnt_handle);
+ if (ret == 0)
+ pr_info("Bgcom resumed\n");
+ return ret;
+}
+
+static const struct dev_pm_ops bgcom_pm = {
+ .suspend = bgcom_pm_suspend,
+ .resume = bgcom_pm_resume,
+};
+
static const struct of_device_id bg_spi_of_match[] = {
{ .compatible = "qcom,bg-spi", },
{ }
@@ -1000,6 +1060,7 @@
.driver = {
.name = "bg-spi",
.of_match_table = bg_spi_of_match,
+ .pm = &bgcom_pm,
},
.probe = bg_spi_probe,
.remove = bg_spi_remove,
diff --git a/drivers/soc/qcom/big_cluster_min_freq_adjust.c b/drivers/soc/qcom/big_cluster_min_freq_adjust.c
new file mode 100644
index 0000000..dbc89e1
--- /dev/null
+++ b/drivers/soc/qcom/big_cluster_min_freq_adjust.c
@@ -0,0 +1,278 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "big_min_freq_adjust: " fmt
+
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/cpu_pm.h>
+#include <linux/types.h>
+#include <linux/smp.h>
+#include <linux/moduleparam.h>
+
+enum min_freq_adjust {
+ ADJUST_MIN_FLOOR, /* Set min floor to user supplied value */
+ RESET_MIN_FLOOR, /* Reset min floor cpuinfo value */
+};
+
+struct big_min_freq_adjust_data {
+ struct cpumask cluster_cpumask;
+ unsigned int min_freq_floor;
+ struct delayed_work min_freq_work;
+ unsigned long min_down_delay_jiffies;
+ enum min_freq_adjust min_freq_state;
+ enum min_freq_adjust min_freq_request;
+ spinlock_t lock;
+ bool big_min_freq_on;
+ bool is_init;
+};
+static struct big_min_freq_adjust_data big_min_freq_adjust_data;
+
+static void cpufreq_min_freq_work(struct work_struct *work)
+{
+ struct big_min_freq_adjust_data *p = &big_min_freq_adjust_data;
+
+ spin_lock(&p->lock);
+ if (p->min_freq_state == p->min_freq_request) {
+ spin_unlock(&p->lock);
+ return;
+ }
+ spin_unlock(&p->lock);
+ cpufreq_update_policy(cpumask_first(&p->cluster_cpumask));
+}
+
+static int cpufreq_callback(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct big_min_freq_adjust_data *p = &big_min_freq_adjust_data;
+ struct cpufreq_policy *policy = data;
+ unsigned int min_freq_floor;
+
+ if (p->big_min_freq_on == false)
+ return NOTIFY_DONE;
+
+ if (val != CPUFREQ_ADJUST)
+ return NOTIFY_DONE;
+
+ if (!cpumask_test_cpu(cpumask_first(&p->cluster_cpumask),
+ policy->related_cpus))
+ return NOTIFY_DONE;
+
+ spin_lock(&p->lock);
+ if (p->min_freq_request == ADJUST_MIN_FLOOR)
+ min_freq_floor = p->min_freq_floor;
+ else
+ min_freq_floor = policy->cpuinfo.min_freq;
+ cpufreq_verify_within_limits(policy, min_freq_floor,
+ policy->cpuinfo.max_freq);
+ p->min_freq_state = p->min_freq_request;
+ spin_unlock(&p->lock);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cpufreq_nb = {
+ .notifier_call = cpufreq_callback
+};
+
+#define AFFINITY_LEVEL_L2 1
+static int cpu_pm_callback(struct notifier_block *self,
+ unsigned long cmd, void *v)
+{
+ struct big_min_freq_adjust_data *p = &big_min_freq_adjust_data;
+ unsigned long aff_level = (unsigned long) v;
+ unsigned long delay;
+ int cpu;
+
+ if (p->big_min_freq_on == false)
+ return NOTIFY_DONE;
+
+ if (aff_level != AFFINITY_LEVEL_L2)
+ return NOTIFY_DONE;
+
+ cpu = smp_processor_id();
+
+ if (!cpumask_test_cpu(cpu, &p->cluster_cpumask))
+ return NOTIFY_DONE;
+
+ spin_lock(&p->lock);
+ switch (cmd) {
+ case CPU_CLUSTER_PM_ENTER:
+ p->min_freq_request = RESET_MIN_FLOOR;
+ delay = p->min_down_delay_jiffies;
+ break;
+ case CPU_CLUSTER_PM_ENTER_FAILED:
+ case CPU_CLUSTER_PM_EXIT:
+ p->min_freq_request = ADJUST_MIN_FLOOR;
+ /* To avoid unnecessary oscillations between exit and idle */
+ delay = 1;
+ break;
+ default:
+ spin_unlock(&p->lock);
+ return NOTIFY_DONE;
+ }
+
+ cancel_delayed_work(&p->min_freq_work);
+
+ if (p->min_freq_state != p->min_freq_request)
+ schedule_delayed_work(&p->min_freq_work, delay);
+ spin_unlock(&p->lock);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cpu_pm_nb = {
+ .notifier_call = cpu_pm_callback
+};
+
+static unsigned long __read_mostly big_min_down_delay_ms;
+#define MIN_DOWN_DELAY_MSEC 80 /* Default big_min_down_delay in msec */
+#define POLICY_MIN 1094400 /* Default min_freq_floor in KHz */
+
+static void trigger_state_machine(void *d)
+{
+ struct big_min_freq_adjust_data *p = &big_min_freq_adjust_data;
+ bool *update_policy = d;
+
+ if (p->min_freq_request != ADJUST_MIN_FLOOR) {
+ p->min_freq_request = ADJUST_MIN_FLOOR;
+ *update_policy = true;
+ }
+}
+
+static int enable_big_min_freq_adjust(void)
+{
+ struct big_min_freq_adjust_data *p = &big_min_freq_adjust_data;
+ int ret;
+ bool update_policy = false;
+
+ if (p->big_min_freq_on == true)
+ return 0;
+
+ INIT_DEFERRABLE_WORK(&p->min_freq_work, cpufreq_min_freq_work);
+
+ cpumask_clear(&p->cluster_cpumask);
+ cpumask_set_cpu(4, &p->cluster_cpumask);
+ cpumask_set_cpu(5, &p->cluster_cpumask);
+ cpumask_set_cpu(6, &p->cluster_cpumask);
+ cpumask_set_cpu(7, &p->cluster_cpumask);
+
+ if (!big_min_down_delay_ms) {
+ big_min_down_delay_ms = MIN_DOWN_DELAY_MSEC;
+ p->min_down_delay_jiffies = msecs_to_jiffies(
+ big_min_down_delay_ms);
+ }
+ if (!p->min_freq_floor)
+ p->min_freq_floor = POLICY_MIN;
+
+ ret = cpu_pm_register_notifier(&cpu_pm_nb);
+ if (ret) {
+ pr_err("Failed to register for PM notification\n");
+ return ret;
+ }
+
+ ret = cpufreq_register_notifier(&cpufreq_nb, CPUFREQ_POLICY_NOTIFIER);
+ if (ret) {
+ pr_err("Failed to register for CPUFREQ POLICY notification\n");
+ cpu_pm_unregister_notifier(&cpu_pm_nb);
+ return ret;
+ }
+
+ p->min_freq_state = RESET_MIN_FLOOR;
+ p->min_freq_request = RESET_MIN_FLOOR;
+ spin_lock_init(&p->lock);
+ p->big_min_freq_on = true;
+
+ /* If BIG cluster is active at this time and continue to be active
+ * forever, in that case min frequency of the cluster will never be
+ * set to floor value. This is to trigger the state machine and set
+ * the min freq and min_freq_state to appropriate values.
+ *
+ * Two possibilities here.
+ * 1) If cluster is idle before this, the wakeup is unnecessary but
+ * the state machine is set to proper state.
+ * 2) If cluster is active before this, the wakeup is necessary and
+ * the state machine is set to proper state.
+ */
+ smp_call_function_any(&p->cluster_cpumask,
+ trigger_state_machine, &update_policy, true);
+ if (update_policy)
+ cpufreq_update_policy(cpumask_first(&p->cluster_cpumask));
+
+ pr_info("big min freq ajustment enabled\n");
+
+ return 0;
+}
+
+static bool __read_mostly big_min_freq_adjust_enabled;
+
+static int set_big_min_freq_adjust(const char *buf,
+ const struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_bool_enable_only(buf, kp);
+ if (ret) {
+ pr_err("Unable to set big_min_freq_adjust_enabled: %d\n", ret);
+ return ret;
+ }
+
+ if (!big_min_freq_adjust_data.is_init)
+ return ret;
+
+ return enable_big_min_freq_adjust();
+}
+
+static const struct kernel_param_ops param_ops_big_min_freq_adjust = {
+ .set = set_big_min_freq_adjust,
+ .get = param_get_bool,
+};
+module_param_cb(min_freq_adjust, ¶m_ops_big_min_freq_adjust,
+ &big_min_freq_adjust_enabled, 0644);
+
+module_param_named(min_freq_floor, big_min_freq_adjust_data.min_freq_floor,
+ uint, 0644);
+
+static int set_min_down_delay_ms(const char *buf, const struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_ulong(buf, kp);
+ if (ret) {
+ pr_err("Unable to set big_min_down_delay_ms: %d\n", ret);
+ return ret;
+ }
+
+ big_min_freq_adjust_data.min_down_delay_jiffies = msecs_to_jiffies(
+ big_min_down_delay_ms);
+
+ return 0;
+}
+
+static const struct kernel_param_ops param_ops_big_min_down_delay_ms = {
+ .set = set_min_down_delay_ms,
+ .get = param_get_ulong,
+};
+module_param_cb(min_down_delay_ms, ¶m_ops_big_min_down_delay_ms,
+ &big_min_down_delay_ms, 0644);
+
+static int __init big_min_freq_adjust_init(void)
+{
+ big_min_freq_adjust_data.is_init = true;
+ if (!big_min_freq_adjust_enabled)
+ return 0;
+
+ return enable_big_min_freq_adjust();
+}
+late_initcall(big_min_freq_adjust_init);
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index c254299..b8e9268 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -2113,6 +2113,7 @@
set_bit(ICNSS_WLFW_EXISTS, &penv->state);
clear_bit(ICNSS_FW_DOWN, &penv->state);
+ icnss_ignore_qmi_timeout(false);
penv->wlfw_clnt = qmi_handle_create(icnss_qmi_wlfw_clnt_notify, penv);
if (!penv->wlfw_clnt) {
@@ -2450,8 +2451,10 @@
int ret = 0;
struct icnss_event_pd_service_down_data *event_data = data;
- if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state))
+ if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state)) {
+ icnss_ignore_qmi_timeout(false);
goto out;
+ }
if (priv->force_err_fatal)
ICNSS_ASSERT(0);
@@ -2475,8 +2478,6 @@
out:
kfree(data);
- icnss_ignore_qmi_timeout(false);
-
return ret;
}
@@ -2485,15 +2486,16 @@
{
int ret = 0;
- if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state))
+ if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state)) {
+ icnss_ignore_qmi_timeout(false);
goto out;
+ }
priv->early_crash_ind = true;
icnss_fw_crashed(priv, NULL);
out:
kfree(data);
- icnss_ignore_qmi_timeout(false);
return ret;
}
@@ -3178,7 +3180,8 @@
if (!dev)
return -ENODEV;
- if (test_bit(ICNSS_FW_DOWN, &penv->state)) {
+ if (test_bit(ICNSS_FW_DOWN, &penv->state) ||
+ !test_bit(ICNSS_FW_READY, &penv->state)) {
icnss_pr_err("FW down, ignoring fw_log_mode state: 0x%lx\n",
penv->state);
return -EINVAL;
@@ -3277,7 +3280,8 @@
if (!dev)
return -ENODEV;
- if (test_bit(ICNSS_FW_DOWN, &penv->state)) {
+ if (test_bit(ICNSS_FW_DOWN, &penv->state) ||
+ !test_bit(ICNSS_FW_READY, &penv->state)) {
icnss_pr_err("FW down, ignoring wlan_enable state: 0x%lx\n",
penv->state);
return -EINVAL;
diff --git a/drivers/soc/qcom/idle.h b/drivers/soc/qcom/idle.h
new file mode 100644
index 0000000..2f852c3
--- /dev/null
+++ b/drivers/soc/qcom/idle.h
@@ -0,0 +1,29 @@
+/* Copyright (c) 2007-2009,2012-2014, 2018, The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_IDLE_H_
+#define _ARCH_ARM_MACH_MSM_IDLE_H_
+
+#define MAX_CPUS_PER_CLUSTER 4
+#define MAX_NUM_CLUSTER 4
+
+#ifndef __ASSEMBLY__
+#if defined(CONFIG_CPU_V7) || defined(CONFIG_ARM64)
+extern unsigned long msm_pm_boot_vector[MAX_NUM_CLUSTER * MAX_CPUS_PER_CLUSTER];
+void msm_pm_boot_entry(void);
+#else
+static inline void msm_pm_boot_entry(void) {}
+#endif
+#endif
+#endif
diff --git a/drivers/soc/qcom/memshare/msm_memshare.c b/drivers/soc/qcom/memshare/msm_memshare.c
index 696c043..6542861 100644
--- a/drivers/soc/qcom/memshare/msm_memshare.c
+++ b/drivers/soc/qcom/memshare/msm_memshare.c
@@ -406,6 +406,7 @@
memblock[i].peripheral ==
DHMS_MEM_PROC_MPSS_V01 &&
!memblock[i].guarantee &&
+ !memblock[i].client_request &&
memblock[i].allotted &&
!memblock[i].alloc_request) {
pr_debug("memshare: hypervisor unmapping for client id: %d\n",
@@ -665,9 +666,10 @@
__func__);
flag = 1;
} else if (!memblock[client_id].guarantee &&
- memblock[client_id].allotted) {
- pr_debug("memshare: %s: size: %d",
- __func__, memblock[client_id].size);
+ !memblock[client_id].client_request &&
+ memblock[client_id].allotted) {
+ pr_debug("memshare: %s:client_id:%d - size: %d",
+ __func__, client_id, memblock[client_id].size);
ret = hyp_assign_phys(memblock[client_id].phy_addr,
memblock[client_id].size, source_vmlist, 1,
dest_vmids, dest_perms, 1);
@@ -676,8 +678,8 @@
* This is an error case as hyp mapping was successful
* earlier but during unmap it lead to failure.
*/
- pr_err("memshare: %s, failed to unmap the region\n",
- __func__);
+ pr_err("memshare: %s, failed to unmap the region for client id:%d\n",
+ __func__, client_id);
}
size = memblock[client_id].size;
if (memblock[client_id].client_id == 1) {
@@ -696,8 +698,8 @@
attrs);
free_client(client_id);
} else {
- pr_err("memshare: %s, Request came for a guaranteed client cannot free up the memory\n",
- __func__);
+ pr_err("memshare: %s, Request came for a guaranteed client (client_id: %d) cannot free up the memory\n",
+ __func__, client_id);
}
if (flag) {
@@ -992,6 +994,10 @@
pdev->dev.of_node,
"qcom,allocate-boot-time");
+ memblock[num_clients].client_request = of_property_read_bool(
+ pdev->dev.of_node,
+ "qcom,allocate-on-request");
+
rc = of_property_read_string(pdev->dev.of_node, "label",
&name);
if (rc) {
diff --git a/drivers/soc/qcom/memshare/msm_memshare.h b/drivers/soc/qcom/memshare/msm_memshare.h
index 6b54652..908f091 100644
--- a/drivers/soc/qcom/memshare/msm_memshare.h
+++ b/drivers/soc/qcom/memshare/msm_memshare.h
@@ -41,6 +41,8 @@
uint32_t allotted;
/* Memory allocation request received or not */
uint32_t alloc_request;
+ /* Allocation on request from a client*/
+ uint32_t client_request;
/* Size required for client */
uint32_t size;
/*
diff --git a/drivers/soc/qcom/msm-pm.c b/drivers/soc/qcom/msm-pm.c
new file mode 100644
index 0000000..129ebce
--- /dev/null
+++ b/drivers/soc/qcom/msm-pm.c
@@ -0,0 +1,921 @@
+/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/ktime.h>
+#include <linux/smp.h>
+#include <linux/tick.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/msm-bus.h>
+#include <linux/uaccess.h>
+#include <linux/dma-mapping.h>
+#include <soc/qcom/spm.h>
+#include <soc/qcom/pm-legacy.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/scm-boot.h>
+#include <asm/suspend.h>
+#include <asm/cacheflush.h>
+#include <asm/cputype.h>
+#include <asm/system_misc.h>
+#ifdef CONFIG_VFP
+#include <asm/vfp.h>
+#endif
+#include <soc/qcom/jtag.h>
+#include "pm-boot.h"
+#include "idle.h"
+
+#define SCM_CMD_TERMINATE_PC (0x2)
+#define SCM_CMD_CORE_HOTPLUGGED (0x10)
+#define SCM_FLUSH_FLAG_MASK (0x3)
+
+#define SCLK_HZ (32768)
+
+#define MAX_BUF_SIZE 1024
+
+static int msm_pm_debug_mask = 1;
+module_param_named(
+ debug_mask, msm_pm_debug_mask, int, 0664
+);
+
+enum {
+ MSM_PM_DEBUG_SUSPEND = BIT(0),
+ MSM_PM_DEBUG_POWER_COLLAPSE = BIT(1),
+ MSM_PM_DEBUG_SUSPEND_LIMITS = BIT(2),
+ MSM_PM_DEBUG_CLOCK = BIT(3),
+ MSM_PM_DEBUG_RESET_VECTOR = BIT(4),
+ MSM_PM_DEBUG_IDLE = BIT(5),
+ MSM_PM_DEBUG_IDLE_LIMITS = BIT(6),
+ MSM_PM_DEBUG_HOTPLUG = BIT(7),
+};
+
+enum msm_pc_count_offsets {
+ MSM_PC_ENTRY_COUNTER,
+ MSM_PC_EXIT_COUNTER,
+ MSM_PC_FALLTHRU_COUNTER,
+ MSM_PC_UNUSED,
+ MSM_PC_NUM_COUNTERS,
+};
+
+static bool msm_pm_ldo_retention_enabled = true;
+static bool msm_pm_tz_flushes_cache;
+static bool msm_pm_ret_no_pll_switch;
+static bool msm_no_ramp_down_pc;
+static struct msm_pm_sleep_status_data *msm_pm_slp_sts;
+static DEFINE_PER_CPU(struct clk *, cpu_clks);
+static struct clk *l2_clk;
+
+static long *msm_pc_debug_counters;
+
+static cpumask_t retention_cpus;
+static DEFINE_SPINLOCK(retention_lock);
+static DEFINE_MUTEX(msm_pc_debug_mutex);
+
+static bool msm_pm_is_L1_writeback(void)
+{
+ u32 cache_id = 0;
+
+#if defined(CONFIG_CPU_V7)
+ u32 sel = 0;
+
+ asm volatile ("mcr p15, 2, %[ccselr], c0, c0, 0\n\t"
+ "isb\n\t"
+ "mrc p15, 1, %[ccsidr], c0, c0, 0\n\t"
+ :[ccsidr]"=r" (cache_id)
+ :[ccselr]"r" (sel)
+ );
+ return cache_id & BIT(30);
+#elif defined(CONFIG_ARM64)
+ u32 sel = 0;
+
+ asm volatile("msr csselr_el1, %[ccselr]\n\t"
+ "isb\n\t"
+ "mrs %[ccsidr],ccsidr_el1\n\t"
+ :[ccsidr]"=r" (cache_id)
+ :[ccselr]"r" (sel)
+ );
+ return cache_id & BIT(30);
+#else
+#error No valid CPU arch selected
+#endif
+}
+
+static bool msm_pm_swfi(bool from_idle)
+{
+ msm_arch_idle();
+ return true;
+}
+
+static bool msm_pm_retention(bool from_idle)
+{
+ int ret = 0;
+ unsigned int cpu = smp_processor_id();
+ struct clk *cpu_clk = per_cpu(cpu_clks, cpu);
+
+ spin_lock(&retention_lock);
+
+ if (!msm_pm_ldo_retention_enabled)
+ goto bailout;
+
+ cpumask_set_cpu(cpu, &retention_cpus);
+ spin_unlock(&retention_lock);
+
+ if (!msm_pm_ret_no_pll_switch)
+ clk_disable(cpu_clk);
+
+ ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_RETENTION, false);
+ WARN_ON(ret);
+
+ msm_arch_idle();
+
+ ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
+ WARN_ON(ret);
+
+ if (!msm_pm_ret_no_pll_switch)
+ if (clk_enable(cpu_clk))
+ pr_err("%s(): Error restore cpu clk\n", __func__);
+
+ spin_lock(&retention_lock);
+ cpumask_clear_cpu(cpu, &retention_cpus);
+bailout:
+ spin_unlock(&retention_lock);
+ return true;
+}
+
+static inline void msm_pc_inc_debug_count(uint32_t cpu,
+ enum msm_pc_count_offsets offset)
+{
+ int cntr_offset;
+ uint32_t cluster_id = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
+ uint32_t cpu_id = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 0);
+
+ if (cluster_id >= MAX_NUM_CLUSTER || cpu_id >= MAX_CPUS_PER_CLUSTER)
+ WARN_ON(cpu);
+
+ cntr_offset = (cluster_id * MAX_CPUS_PER_CLUSTER * MSM_PC_NUM_COUNTERS)
+ + (cpu_id * MSM_PC_NUM_COUNTERS) + offset;
+
+ if (!msm_pc_debug_counters)
+ return;
+
+ msm_pc_debug_counters[cntr_offset]++;
+}
+
+static bool msm_pm_pc_hotplug(void)
+{
+ uint32_t cpu = smp_processor_id();
+ enum msm_pm_l2_scm_flag flag;
+ struct scm_desc desc;
+
+ flag = lpm_cpu_pre_pc_cb(cpu);
+
+ if (!msm_pm_tz_flushes_cache) {
+ if (flag == MSM_SCM_L2_OFF)
+ flush_cache_all();
+ else if (msm_pm_is_L1_writeback())
+ flush_cache_louis();
+ }
+
+ msm_pc_inc_debug_count(cpu, MSM_PC_ENTRY_COUNTER);
+
+ if (is_scm_armv8()) {
+ desc.args[0] = SCM_CMD_CORE_HOTPLUGGED |
+ (flag & SCM_FLUSH_FLAG_MASK);
+ desc.arginfo = SCM_ARGS(1);
+ scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT,
+ SCM_CMD_TERMINATE_PC), &desc);
+ } else {
+ scm_call_atomic1(SCM_SVC_BOOT, SCM_CMD_TERMINATE_PC,
+ SCM_CMD_CORE_HOTPLUGGED | (flag & SCM_FLUSH_FLAG_MASK));
+ }
+
+ /* Should not return here */
+ msm_pc_inc_debug_count(cpu, MSM_PC_FALLTHRU_COUNTER);
+ return 0;
+}
+
+static bool msm_pm_fastpc(bool from_idle)
+{
+ int ret = 0;
+ unsigned int cpu = smp_processor_id();
+
+ ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_FASTPC, false);
+ WARN_ON(ret);
+
+ if (from_idle || cpu_online(cpu))
+ msm_arch_idle();
+ else
+ msm_pm_pc_hotplug();
+
+ ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
+ WARN_ON(ret);
+
+ return true;
+}
+
+int msm_pm_collapse(unsigned long unused)
+{
+ uint32_t cpu = smp_processor_id();
+ enum msm_pm_l2_scm_flag flag;
+ struct scm_desc desc;
+
+ flag = lpm_cpu_pre_pc_cb(cpu);
+
+ if (!msm_pm_tz_flushes_cache) {
+ if (flag == MSM_SCM_L2_OFF)
+ flush_cache_all();
+ else if (msm_pm_is_L1_writeback())
+ flush_cache_louis();
+ }
+ msm_pc_inc_debug_count(cpu, MSM_PC_ENTRY_COUNTER);
+
+ if (is_scm_armv8()) {
+ desc.args[0] = flag;
+ desc.arginfo = SCM_ARGS(1);
+ scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT,
+ SCM_CMD_TERMINATE_PC), &desc);
+ } else {
+ scm_call_atomic1(SCM_SVC_BOOT, SCM_CMD_TERMINATE_PC, flag);
+ }
+
+ msm_pc_inc_debug_count(cpu, MSM_PC_FALLTHRU_COUNTER);
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_pm_collapse);
+
+static bool __ref msm_pm_spm_power_collapse(
+ unsigned int cpu, int mode, bool from_idle, bool notify_rpm)
+{
+ void *entry;
+ bool collapsed = 0;
+ int ret;
+ bool save_cpu_regs = (cpu_online(cpu) || from_idle);
+
+ if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+ pr_info("CPU%u: %s: notify_rpm %d\n",
+ cpu, __func__, (int) notify_rpm);
+
+ ret = msm_spm_set_low_power_mode(mode, notify_rpm);
+ WARN_ON(ret);
+
+ entry = save_cpu_regs ? cpu_resume : msm_secondary_startup;
+
+ msm_pm_boot_config_before_pc(cpu, virt_to_phys(entry));
+
+ if (MSM_PM_DEBUG_RESET_VECTOR & msm_pm_debug_mask)
+ pr_info("CPU%u: %s: program vector to %pk\n",
+ cpu, __func__, entry);
+
+ msm_jtag_save_state();
+
+ collapsed = save_cpu_regs ?
+ !cpu_suspend(0, msm_pm_collapse) : msm_pm_pc_hotplug();
+
+ msm_jtag_restore_state();
+
+ if (collapsed)
+ local_fiq_enable();
+
+ msm_pm_boot_config_after_pc(cpu);
+
+ if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+ pr_info("CPU%u: %s: msm_pm_collapse returned, collapsed %d\n",
+ cpu, __func__, collapsed);
+
+ ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
+ WARN_ON(ret);
+ return collapsed;
+}
+
+static bool msm_pm_power_collapse_standalone(
+ bool from_idle)
+{
+ unsigned int cpu = smp_processor_id();
+ bool collapsed;
+
+ collapsed = msm_pm_spm_power_collapse(cpu,
+ MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE,
+ from_idle, false);
+
+ return collapsed;
+}
+
+static int ramp_down_last_cpu(int cpu)
+{
+ struct clk *cpu_clk = per_cpu(cpu_clks, cpu);
+ int ret = 0;
+
+ clk_disable(cpu_clk);
+ clk_disable(l2_clk);
+
+ return ret;
+}
+
+static int ramp_up_first_cpu(int cpu, int saved_rate)
+{
+ struct clk *cpu_clk = per_cpu(cpu_clks, cpu);
+ int rc = 0;
+
+ if (MSM_PM_DEBUG_CLOCK & msm_pm_debug_mask)
+ pr_info("CPU%u: %s: restore clock rate\n",
+ cpu, __func__);
+
+ clk_enable(l2_clk);
+
+ if (cpu_clk) {
+ int ret = clk_enable(cpu_clk);
+
+ if (ret) {
+ pr_err("%s(): Error restoring cpu clk\n",
+ __func__);
+ return ret;
+ }
+ }
+
+ return rc;
+}
+
+static bool msm_pm_power_collapse(bool from_idle)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned long saved_acpuclk_rate = 0;
+ bool collapsed;
+
+ if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+ pr_info("CPU%u: %s: idle %d\n",
+ cpu, __func__, (int)from_idle);
+
+ if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+ pr_info("CPU%u: %s: pre power down\n", cpu, __func__);
+
+ if (cpu_online(cpu) && !msm_no_ramp_down_pc)
+ saved_acpuclk_rate = ramp_down_last_cpu(cpu);
+
+ collapsed = msm_pm_spm_power_collapse(cpu, MSM_SPM_MODE_POWER_COLLAPSE,
+ from_idle, true);
+
+ if (cpu_online(cpu) && !msm_no_ramp_down_pc)
+ ramp_up_first_cpu(cpu, saved_acpuclk_rate);
+
+ if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+ pr_info("CPU%u: %s: post power up\n", cpu, __func__);
+
+ if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+ pr_info("CPU%u: %s: return\n", cpu, __func__);
+ return collapsed;
+}
+/******************************************************************************
+ * External Idle/Suspend Functions
+ *****************************************************************************/
+
+static void arch_idle(void) {}
+
+static bool (*execute[MSM_PM_SLEEP_MODE_NR])(bool idle) = {
+ [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT] = msm_pm_swfi,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE] =
+ msm_pm_power_collapse_standalone,
+ [MSM_PM_SLEEP_MODE_RETENTION] = msm_pm_retention,
+ [MSM_PM_SLEEP_MODE_FASTPC] = msm_pm_fastpc,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = msm_pm_power_collapse,
+};
+
+/**
+ * msm_cpu_pm_enter_sleep(): Enter a low power mode on current cpu
+ *
+ * @mode - sleep mode to enter
+ * @from_idle - bool to indicate that the mode is exercised during idle/suspend
+ *
+ * returns none
+ *
+ * The code should be with interrupts disabled and on the core on which the
+ * low power is to be executed.
+ *
+ */
+bool msm_cpu_pm_enter_sleep(enum msm_pm_sleep_mode mode, bool from_idle)
+{
+ bool exit_stat = false;
+ unsigned int cpu = smp_processor_id();
+
+ if ((!from_idle && cpu_online(cpu))
+ || (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask))
+ pr_info("CPU%u:%s mode:%d during %s\n", cpu, __func__,
+ mode, from_idle ? "idle" : "suspend");
+
+ if (execute[mode])
+ exit_stat = execute[mode](from_idle);
+
+ return exit_stat;
+}
+
+/**
+ * msm_pm_wait_cpu_shutdown() - Wait for a core to be power collapsed during
+ * hotplug
+ *
+ * @ cpu - cpu to wait on.
+ *
+ * Blocking function call that waits on the core to be power collapsed. This
+ * function is called from platform_cpu_die to ensure that a core is power
+ * collapsed before sending the CPU_DEAD notification so the drivers could
+ * remove the resource votes for this CPU(regulator and clock)
+ */
+int msm_pm_wait_cpu_shutdown(unsigned int cpu)
+{
+ int timeout = 0;
+
+ if (!msm_pm_slp_sts)
+ return 0;
+ if (!msm_pm_slp_sts[cpu].base_addr)
+ return 0;
+ while (1) {
+ /*
+ * Check for the SPM of the core being hotplugged to set
+ * its sleep state.The SPM sleep state indicates that the
+ * core has been power collapsed.
+ */
+ int acc_sts = __raw_readl(msm_pm_slp_sts[cpu].base_addr);
+
+ if (acc_sts & msm_pm_slp_sts[cpu].mask)
+ return 0;
+
+ udelay(100);
+ /*
+ * Dump spm registers for debugging
+ */
+ if (++timeout == 20) {
+ msm_spm_dump_regs(cpu);
+ __WARN_printf(
+ "CPU%u didn't collapse in 2ms, sleep status: 0x%x\n",
+ cpu, acc_sts);
+ }
+ }
+
+ return -EBUSY;
+}
+
+static void msm_pm_ack_retention_disable(void *data)
+{
+ /*
+ * This is a NULL function to ensure that the core has woken up
+ * and is safe to disable retention.
+ */
+}
+/**
+ * msm_pm_enable_retention() - Disable/Enable retention on all cores
+ * @enable: Enable/Disable retention
+ *
+ */
+void msm_pm_enable_retention(bool enable)
+{
+ if (enable == msm_pm_ldo_retention_enabled)
+ return;
+
+ msm_pm_ldo_retention_enabled = enable;
+
+ /*
+ * If retention is being disabled, wakeup all online core to ensure
+ * that it isn't executing retention. Offlined cores need not be woken
+ * up as they enter the deepest sleep mode, namely RPM assited power
+ * collapse
+ */
+ if (!enable) {
+ preempt_disable();
+ smp_call_function_many(&retention_cpus,
+ msm_pm_ack_retention_disable,
+ NULL, true);
+ preempt_enable();
+ }
+}
+EXPORT_SYMBOL(msm_pm_enable_retention);
+
+/**
+ * msm_pm_retention_enabled() - Check if retention is enabled
+ *
+ * returns true if retention is enabled
+ */
+bool msm_pm_retention_enabled(void)
+{
+ return msm_pm_ldo_retention_enabled;
+}
+EXPORT_SYMBOL(msm_pm_retention_enabled);
+
+static int msm_pm_snoc_client_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ static struct msm_bus_scale_pdata *msm_pm_bus_pdata;
+ static uint32_t msm_pm_bus_client;
+
+ msm_pm_bus_pdata = msm_bus_cl_get_pdata(pdev);
+
+ if (msm_pm_bus_pdata) {
+ msm_pm_bus_client =
+ msm_bus_scale_register_client(msm_pm_bus_pdata);
+
+ if (!msm_pm_bus_client) {
+ pr_err("%s: Failed to register SNOC client", __func__);
+ rc = -ENXIO;
+ goto snoc_cl_probe_done;
+ }
+
+ rc = msm_bus_scale_client_update_request(msm_pm_bus_client, 1);
+
+ if (rc)
+ pr_err("%s: Error setting bus rate", __func__);
+ }
+
+snoc_cl_probe_done:
+ return rc;
+}
+
+static int msm_cpu_status_probe(struct platform_device *pdev)
+{
+ u32 cpu;
+ int rc;
+
+ if (!pdev || !pdev->dev.of_node)
+ return -EFAULT;
+
+ msm_pm_slp_sts = devm_kzalloc(&pdev->dev,
+ sizeof(*msm_pm_slp_sts) * num_possible_cpus(),
+ GFP_KERNEL);
+
+ if (!msm_pm_slp_sts)
+ return -ENOMEM;
+
+
+ for_each_possible_cpu(cpu) {
+ struct device_node *cpun, *node;
+ char *key;
+
+ cpun = of_get_cpu_node(cpu, NULL);
+
+ if (!cpun) {
+ __WARN();
+ continue;
+ }
+
+ node = of_parse_phandle(cpun, "qcom,sleep-status", 0);
+ if (!node)
+ return -ENODEV;
+
+ msm_pm_slp_sts[cpu].base_addr = of_iomap(node, 0);
+ if (!msm_pm_slp_sts[cpu].base_addr) {
+ pr_err("%s: Can't find base addr\n", __func__);
+ return -ENODEV;
+ }
+
+ key = "qcom,sleep-status-mask";
+ rc = of_property_read_u32(node, key, &msm_pm_slp_sts[cpu].mask);
+ if (rc) {
+ pr_err("%s: Can't find %s property\n", __func__, key);
+ iounmap(msm_pm_slp_sts[cpu].base_addr);
+ return rc;
+ }
+ }
+
+ return 0;
+};
+
+static const struct of_device_id msm_slp_sts_match_tbl[] = {
+ {.compatible = "qcom,cpu-sleep-status"},
+ {},
+};
+
+static struct platform_driver msm_cpu_status_driver = {
+ .probe = msm_cpu_status_probe,
+ .driver = {
+ .name = "cpu_slp_status",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_slp_sts_match_tbl,
+ },
+};
+
+static const struct of_device_id msm_snoc_clnt_match_tbl[] = {
+ {.compatible = "qcom,pm-snoc-client"},
+ {},
+};
+
+static struct platform_driver msm_cpu_pm_snoc_client_driver = {
+ .probe = msm_pm_snoc_client_probe,
+ .driver = {
+ .name = "pm_snoc_client",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_snoc_clnt_match_tbl,
+ },
+};
+
+struct msm_pc_debug_counters_buffer {
+ long *reg;
+ u32 len;
+ char buf[MAX_BUF_SIZE];
+};
+
+static char *counter_name[MSM_PC_NUM_COUNTERS] = {
+ "PC Entry Counter",
+ "Warmboot Entry Counter",
+ "PC Bailout Counter"
+};
+
+static int msm_pc_debug_counters_copy(
+ struct msm_pc_debug_counters_buffer *data)
+{
+ int j;
+ u32 stat;
+ unsigned int cpu;
+ unsigned int len;
+ uint32_t cluster_id;
+ uint32_t cpu_id;
+ uint32_t offset;
+
+ for_each_possible_cpu(cpu) {
+ len = scnprintf(data->buf + data->len,
+ sizeof(data->buf)-data->len,
+ "CPU%d\n", cpu);
+ cluster_id = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
+ cpu_id = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 0);
+ offset = (cluster_id * MAX_CPUS_PER_CLUSTER
+ * MSM_PC_NUM_COUNTERS)
+ + (cpu_id * MSM_PC_NUM_COUNTERS);
+
+ data->len += len;
+
+ for (j = 0; j < MSM_PC_NUM_COUNTERS - 1; j++) {
+ stat = data->reg[offset + j];
+ len = scnprintf(data->buf + data->len,
+ sizeof(data->buf) - data->len,
+ "\t%s: %d", counter_name[j], stat);
+
+ data->len += len;
+ }
+ len = scnprintf(data->buf + data->len,
+ sizeof(data->buf) - data->len,
+ "\n");
+
+ data->len += len;
+ }
+
+ return data->len;
+}
+
+static ssize_t msm_pc_debug_counters_file_read(struct file *file,
+ char __user *bufu, size_t count, loff_t *ppos)
+{
+ struct msm_pc_debug_counters_buffer *data;
+ ssize_t ret;
+
+ mutex_lock(&msm_pc_debug_mutex);
+ data = file->private_data;
+
+ if (!data) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (!bufu) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (!access_ok(VERIFY_WRITE, bufu, count)) {
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ if (*ppos >= data->len && data->len == 0)
+ data->len = msm_pc_debug_counters_copy(data);
+
+ ret = simple_read_from_buffer(bufu, count, ppos,
+ data->buf, data->len);
+exit:
+ mutex_unlock(&msm_pc_debug_mutex);
+ return ret;
+}
+
+static int msm_pc_debug_counters_file_open(struct inode *inode,
+ struct file *file)
+{
+ struct msm_pc_debug_counters_buffer *buf;
+ int ret = 0;
+
+ mutex_lock(&msm_pc_debug_mutex);
+
+ if (!inode->i_private) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ file->private_data = kzalloc(
+ sizeof(struct msm_pc_debug_counters_buffer), GFP_KERNEL);
+
+ if (!file->private_data) {
+ pr_err("%s: ERROR kmalloc failed to allocate %zu bytes\n",
+ __func__, sizeof(struct msm_pc_debug_counters_buffer));
+
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ buf = file->private_data;
+ buf->reg = (long *)inode->i_private;
+
+exit:
+ mutex_unlock(&msm_pc_debug_mutex);
+ return ret;
+}
+
+static int msm_pc_debug_counters_file_close(struct inode *inode,
+ struct file *file)
+{
+ mutex_lock(&msm_pc_debug_mutex);
+ kfree(file->private_data);
+ mutex_unlock(&msm_pc_debug_mutex);
+ return 0;
+}
+
+static const struct file_operations msm_pc_debug_counters_fops = {
+ .open = msm_pc_debug_counters_file_open,
+ .read = msm_pc_debug_counters_file_read,
+ .release = msm_pc_debug_counters_file_close,
+ .llseek = no_llseek,
+};
+
+static int msm_pm_clk_init(struct platform_device *pdev)
+{
+ bool synced_clocks;
+ u32 cpu;
+ char clk_name[] = "cpu??_clk";
+ char *key;
+
+ key = "qcom,saw-turns-off-pll";
+ if (of_property_read_bool(pdev->dev.of_node, key))
+ return 0;
+
+ key = "qcom,synced-clocks";
+ synced_clocks = of_property_read_bool(pdev->dev.of_node, key);
+
+ for_each_possible_cpu(cpu) {
+ struct clk *clk;
+
+ snprintf(clk_name, sizeof(clk_name), "cpu%d_clk", cpu);
+ clk = clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(clk)) {
+ if (cpu && synced_clocks)
+ return 0;
+ clk = NULL;
+ }
+ per_cpu(cpu_clks, cpu) = clk;
+ }
+
+ if (synced_clocks)
+ return 0;
+
+ l2_clk = clk_get(&pdev->dev, "l2_clk");
+ if (IS_ERR(l2_clk))
+ pr_warn("%s: Could not get l2_clk (-%ld)\n", __func__,
+ PTR_ERR(l2_clk));
+
+ return 0;
+}
+
+static int msm_cpu_pm_probe(struct platform_device *pdev)
+{
+ struct dentry *dent = NULL;
+ struct resource *res = NULL;
+ int ret = 0;
+ void __iomem *msm_pc_debug_counters_imem;
+ char *key;
+ int alloc_size = (MAX_NUM_CLUSTER * MAX_CPUS_PER_CLUSTER
+ * MSM_PC_NUM_COUNTERS
+ * sizeof(*msm_pc_debug_counters));
+
+ msm_pc_debug_counters = dma_alloc_coherent(&pdev->dev, alloc_size,
+ &msm_pc_debug_counters_phys, GFP_KERNEL);
+
+ if (msm_pc_debug_counters) {
+ memset(msm_pc_debug_counters, 0, alloc_size);
+ dent = debugfs_create_file("pc_debug_counter", 0444, NULL,
+ msm_pc_debug_counters,
+ &msm_pc_debug_counters_fops);
+ if (!dent)
+ pr_err("%s: ERROR debugfs_create_file failed\n",
+ __func__);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ goto skip_save_imem;
+ msm_pc_debug_counters_imem = devm_ioremap(&pdev->dev,
+ res->start, resource_size(res));
+ if (msm_pc_debug_counters_imem) {
+ writel_relaxed(msm_pc_debug_counters_phys,
+ msm_pc_debug_counters_imem);
+ /* memory barrier */
+ mb();
+ devm_iounmap(&pdev->dev,
+ msm_pc_debug_counters_imem);
+ }
+ } else {
+ msm_pc_debug_counters = NULL;
+ msm_pc_debug_counters_phys = 0;
+ }
+skip_save_imem:
+ if (pdev->dev.of_node) {
+ key = "qcom,tz-flushes-cache";
+ msm_pm_tz_flushes_cache =
+ of_property_read_bool(pdev->dev.of_node, key);
+
+ key = "qcom,no-pll-switch-for-retention";
+ msm_pm_ret_no_pll_switch =
+ of_property_read_bool(pdev->dev.of_node, key);
+
+ ret = msm_pm_clk_init(pdev);
+ if (ret) {
+ pr_info("msm_pm_clk_init returned error\n");
+ return ret;
+ }
+ }
+
+ if (pdev->dev.of_node)
+ of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+
+ return ret;
+}
+
+static const struct of_device_id msm_cpu_pm_table[] = {
+ {.compatible = "qcom,pm"},
+ {},
+};
+
+static struct platform_driver msm_cpu_pm_driver = {
+ .probe = msm_cpu_pm_probe,
+ .driver = {
+ .name = "msm-pm",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_cpu_pm_table,
+ },
+};
+
+static int __init msm_pm_drv_init(void)
+{
+ int rc;
+
+ cpumask_clear(&retention_cpus);
+
+ rc = platform_driver_register(&msm_cpu_pm_snoc_client_driver);
+
+ if (rc)
+ pr_err("%s(): failed to register driver %s\n", __func__,
+ msm_cpu_pm_snoc_client_driver.driver.name);
+ return rc;
+}
+late_initcall(msm_pm_drv_init);
+
+static int __init msm_pm_debug_counters_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&msm_cpu_pm_driver);
+
+ if (rc)
+ pr_err("%s(): failed to register driver %s\n", __func__,
+ msm_cpu_pm_driver.driver.name);
+ return rc;
+}
+fs_initcall(msm_pm_debug_counters_init);
+
+int __init msm_pm_sleep_status_init(void)
+{
+ static bool registered;
+
+ if (registered)
+ return 0;
+ registered = true;
+
+ return platform_driver_register(&msm_cpu_status_driver);
+}
+arch_initcall(msm_pm_sleep_status_init);
+
+#ifdef CONFIG_ARM
+static int idle_initialize(void)
+{
+ arm_pm_idle = arch_idle;
+ return 0;
+}
+early_initcall(idle_initialize);
+#endif
diff --git a/drivers/soc/qcom/pil_bg_intf.h b/drivers/soc/qcom/pil_bg_intf.h
index 722024b..46aed25 100644
--- a/drivers/soc/qcom/pil_bg_intf.h
+++ b/drivers/soc/qcom/pil_bg_intf.h
@@ -36,7 +36,7 @@
__packed struct tzapp_bg_rsp {
uint32_t tzapp_bg_cmd;
uint32_t bg_info_len;
- uint32_t status;
+ int32_t status;
uint32_t bg_info[100];
};
diff --git a/drivers/soc/qcom/pm-boot.c b/drivers/soc/qcom/pm-boot.c
new file mode 100644
index 0000000..f0daeba
--- /dev/null
+++ b/drivers/soc/qcom/pm-boot.c
@@ -0,0 +1,98 @@
+/* Copyright (c) 2011-2014, 2016, 2018, The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <soc/qcom/scm-boot.h>
+#include <asm/cacheflush.h>
+#include <asm/cputype.h>
+#include <asm/smp_plat.h>
+#include "pm-boot.h"
+#include "idle.h"
+
+#define CPU_INDEX(cluster, cpu) (cluster * MAX_CPUS_PER_CLUSTER + cpu)
+
+static void (*msm_pm_boot_before_pc)(unsigned int cpu, unsigned long entry);
+static void (*msm_pm_boot_after_pc)(unsigned int cpu);
+
+static int msm_pm_tz_boot_init(void)
+{
+ int ret;
+ phys_addr_t warmboot_addr = virt_to_phys(msm_pm_boot_entry);
+
+ if (scm_is_mc_boot_available())
+ ret = scm_set_warm_boot_addr_mc_for_all(warmboot_addr);
+ else {
+ unsigned int flag = 0;
+
+ if (num_possible_cpus() == 1)
+ flag = SCM_FLAG_WARMBOOT_CPU0;
+ else if (num_possible_cpus() == 2)
+ flag = SCM_FLAG_WARMBOOT_CPU0 | SCM_FLAG_WARMBOOT_CPU1;
+ else if (num_possible_cpus() == 4)
+ flag = SCM_FLAG_WARMBOOT_CPU0 | SCM_FLAG_WARMBOOT_CPU1 |
+ SCM_FLAG_WARMBOOT_CPU2 | SCM_FLAG_WARMBOOT_CPU3;
+ else
+ pr_warn("%s: set warmboot address failed\n",
+ __func__);
+
+ ret = scm_set_boot_addr(virt_to_phys(msm_pm_boot_entry), flag);
+ }
+ return ret;
+}
+static void msm_pm_write_boot_vector(unsigned int cpu, unsigned long address)
+{
+ uint32_t clust_id = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
+ uint32_t cpu_id = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 0);
+ unsigned long *start_address;
+ unsigned long *end_address;
+
+ if (clust_id >= MAX_NUM_CLUSTER || cpu_id >= MAX_CPUS_PER_CLUSTER)
+ WARN_ON(cpu);
+
+ msm_pm_boot_vector[CPU_INDEX(clust_id, cpu_id)] = address;
+ start_address = &msm_pm_boot_vector[CPU_INDEX(clust_id, cpu_id)];
+ end_address = &msm_pm_boot_vector[CPU_INDEX(clust_id, cpu_id + 1)];
+ dmac_clean_range((void *)start_address, (void *)end_address);
+}
+
+static void msm_pm_config_tz_before_pc(unsigned int cpu,
+ unsigned long entry)
+{
+ msm_pm_write_boot_vector(cpu, entry);
+}
+
+void msm_pm_boot_config_before_pc(unsigned int cpu, unsigned long entry)
+{
+ if (msm_pm_boot_before_pc)
+ msm_pm_boot_before_pc(cpu, entry);
+}
+
+void msm_pm_boot_config_after_pc(unsigned int cpu)
+{
+ if (msm_pm_boot_after_pc)
+ msm_pm_boot_after_pc(cpu);
+}
+
+static int __init msm_pm_boot_init(void)
+{
+ int ret = 0;
+
+ ret = msm_pm_tz_boot_init();
+ msm_pm_boot_before_pc = msm_pm_config_tz_before_pc;
+ msm_pm_boot_after_pc = NULL;
+
+ return ret;
+}
+postcore_initcall(msm_pm_boot_init);
diff --git a/drivers/soc/qcom/pm-boot.h b/drivers/soc/qcom/pm-boot.h
new file mode 100644
index 0000000..7ec053a
--- /dev/null
+++ b/drivers/soc/qcom/pm-boot.h
@@ -0,0 +1,19 @@
+/* Copyright (c) 2011-2014, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _ARCH_ARM_MACH_MSM_PM_BOOT_H
+#define _ARCH_ARM_MACH_MSM_PM_BOOT_H
+
+void msm_pm_boot_config_before_pc(unsigned int cpu, unsigned long entry);
+void msm_pm_boot_config_after_pc(unsigned int cpu);
+
+#endif
diff --git a/drivers/soc/qcom/qdss_bridge.c b/drivers/soc/qcom/qdss_bridge.c
index 8668155..fb70a07 100644
--- a/drivers/soc/qcom/qdss_bridge.c
+++ b/drivers/soc/qcom/qdss_bridge.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -245,7 +245,7 @@
switch (event) {
case USB_QDSS_CONNECT:
- usb_qdss_alloc_req(drvdata->usb_ch, poolsize, 0);
+ usb_qdss_alloc_req(ch, poolsize, 0);
mhi_queue_read(drvdata);
break;
diff --git a/drivers/soc/qcom/spm_devices.c b/drivers/soc/qcom/spm_devices.c
index 5f1ac4e..268a8fe 100644
--- a/drivers/soc/qcom/spm_devices.c
+++ b/drivers/soc/qcom/spm_devices.c
@@ -234,7 +234,7 @@
}
static int msm_spm_dev_set_low_power_mode(struct msm_spm_device *dev,
- unsigned int mode, bool notify_rpm)
+ unsigned int mode, bool notify_rpm, bool set_spm_enable)
{
uint32_t i;
int ret = -EINVAL;
@@ -251,9 +251,11 @@
if (!dev->num_modes)
return 0;
- if (mode == MSM_SPM_MODE_DISABLED) {
+ if (mode == MSM_SPM_MODE_DISABLED && set_spm_enable) {
ret = msm_spm_drv_set_spm_enable(&dev->reg_data, false);
- } else if (!msm_spm_drv_set_spm_enable(&dev->reg_data, true)) {
+ } else {
+ if (set_spm_enable)
+ ret = msm_spm_drv_set_spm_enable(&dev->reg_data, true);
for (i = 0; i < dev->num_modes; i++) {
if (dev->modes[i].mode != mode)
continue;
@@ -539,10 +541,24 @@
{
struct msm_spm_device *dev = this_cpu_ptr(&msm_cpu_spm_device);
- return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm);
+ return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm, true);
}
EXPORT_SYMBOL(msm_spm_set_low_power_mode);
+void msm_spm_set_rpm_hs(bool allow_rpm_hs)
+{
+ struct msm_spm_device *dev = this_cpu_ptr(&msm_cpu_spm_device);
+
+ dev->allow_rpm_hs = allow_rpm_hs;
+}
+EXPORT_SYMBOL(msm_spm_set_rpm_hs);
+
+int msm_spm_config_low_power_mode_addr(struct msm_spm_device *dev,
+ unsigned int mode, bool notify_rpm)
+{
+ return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm, false);
+}
+
/**
* msm_spm_init(): Board initalization function
* @data: platform specific SPM register configuration data
@@ -586,7 +602,7 @@
int msm_spm_config_low_power_mode(struct msm_spm_device *dev,
unsigned int mode, bool notify_rpm)
{
- return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm);
+ return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm, true);
}
#ifdef CONFIG_MSM_L2_SPM
diff --git a/drivers/soc/qcom/subsys-pil-bg.c b/drivers/soc/qcom/subsys-pil-bg.c
index 75c3666..070733e 100644
--- a/drivers/soc/qcom/subsys-pil-bg.c
+++ b/drivers/soc/qcom/subsys-pil-bg.c
@@ -30,6 +30,7 @@
#include "peripheral-loader.h"
#include "../../misc/qseecom_kernel.h"
#include "pil_bg_intf.h"
+#include "bgcom_interface.h"
#define INVALID_GPIO -1
#define NUM_GPIOS 4
@@ -37,7 +38,7 @@
#define desc_to_data(d) container_of(d, struct pil_bg_data, desc)
#define subsys_to_data(d) container_of(d, struct pil_bg_data, subsys_desc)
#define BG_RAMDUMP_SZ 0x00102000
-#define BG_CRASH_IN_TWM 2
+#define BG_CRASH_IN_TWM -2
/**
* struct pil_bg_data
* @qseecom_handle: handle of TZ app
@@ -90,9 +91,18 @@
static void bg_app_shutdown_notify(const struct subsys_desc *subsys)
{
struct pil_bg_data *bg_data = subsys_to_data(subsys);
+
+ /* Disable irq if already BG is up */
+ if (bg_data->is_ready) {
+ disable_irq(bg_data->status_irq);
+ disable_irq(bg_data->errfatal_irq);
+ bg_data->is_ready = false;
+ }
/* Toggle AP2BG err fatal gpio here to inform apps err fatal event */
- if (gpio_is_valid(bg_data->gpios[2]))
+ if (gpio_is_valid(bg_data->gpios[2])) {
+ pr_debug("Sending Apps shutdown signal\n");
gpio_set_value(bg_data->gpios[2], 1);
+ }
}
/**
@@ -106,9 +116,18 @@
{
struct pil_bg_data *bg_data = container_of(nb,
struct pil_bg_data, reboot_blk);
+
+ /* Disable irq if already BG is up */
+ if (bg_data->is_ready) {
+ disable_irq(bg_data->status_irq);
+ disable_irq(bg_data->errfatal_irq);
+ bg_data->is_ready = false;
+ }
/* Toggle AP2BG err fatal gpio here to inform apps err fatal event */
- if (gpio_is_valid(bg_data->gpios[2]))
+ if (gpio_is_valid(bg_data->gpios[2])) {
+ pr_debug("Sending reboot signal\n");
gpio_set_value(bg_data->gpios[2], 1);
+ }
return NOTIFY_DONE;
}
@@ -266,7 +285,6 @@
return ret;
}
enable_irq(bg_data->status_irq);
- enable_irq(bg_data->errfatal_irq);
ret = wait_for_err_ready(bg_data);
if (ret) {
dev_err(bg_data->desc.dev,
@@ -289,10 +307,12 @@
{
struct pil_bg_data *bg_data = subsys_to_data(subsys);
- disable_irq(bg_data->status_irq);
- devm_free_irq(bg_data->desc.dev, bg_data->status_irq, bg_data);
- disable_irq(bg_data->errfatal_irq);
- bg_data->is_ready = false;
+ if (bg_data->is_ready) {
+ disable_irq(bg_data->status_irq);
+ devm_free_irq(bg_data->desc.dev, bg_data->status_irq, bg_data);
+ disable_irq(bg_data->errfatal_irq);
+ bg_data->is_ready = false;
+ }
return 0;
}
@@ -393,7 +413,9 @@
ret = bgpil_tzapp_comm(bg_data, &bg_tz_req);
if (bg_data->cmd_status == BG_CRASH_IN_TWM) {
/* Do ramdump and resend boot cmd */
- bg_data->subsys_desc.ramdump(true, &bg_data->subsys_desc);
+ if (is_twm_exit())
+ bg_data->subsys_desc.ramdump(true,
+ &bg_data->subsys_desc);
bg_tz_req.tzapp_bg_cmd = BGPIL_DLOAD_CONT;
ret = bgpil_tzapp_comm(bg_data, &bg_tz_req);
}
@@ -524,7 +546,6 @@
} else if (value == false && drvdata->is_ready) {
dev_err(drvdata->desc.dev,
"BG got unexpected reset: irq state changed 1->0\n");
- drvdata->is_ready = false;
queue_work(drvdata->bg_queue, &drvdata->restart_work);
} else {
dev_err(drvdata->desc.dev,
@@ -586,7 +607,6 @@
goto err;
}
drvdata->errfatal_irq = irq;
- enable_irq(drvdata->errfatal_irq);
/* Configure outgoing GPIO's */
if (gpio_request(drvdata->gpios[2], "AP2BG_ERRFATAL")) {
dev_err(&pdev->dev,
diff --git a/drivers/soc/qcom/system_pm.c b/drivers/soc/qcom/system_pm.c
index 9c6edbf..1befdcf 100644
--- a/drivers/soc/qcom/system_pm.c
+++ b/drivers/soc/qcom/system_pm.c
@@ -22,6 +22,13 @@
#define PDC_TIME_VALID_SHIFT 31
#define PDC_TIME_UPPER_MASK 0xFFFFFF
+#ifdef CONFIG_ARM_GIC_V3
+#include <linux/irqchip/arm-gic-v3.h>
+#else
+static inline void gic_v3_dist_restore(void) {}
+static inline void gic_v3_dist_save(void) {}
+#endif
+
static struct rpmh_client *rpmh_client;
static int setup_wakeup(uint32_t lo, uint32_t hi)
@@ -61,6 +68,7 @@
*/
static int system_sleep_enter(struct cpumask *mask)
{
+ gic_v3_dist_save();
return rpmh_flush(rpmh_client);
}
@@ -70,6 +78,7 @@
static void system_sleep_exit(void)
{
msm_rpmh_master_stats_update();
+ gic_v3_dist_restore();
}
static struct system_pm_ops pm_ops = {
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 6199523..0f0b7ba 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -1449,6 +1449,7 @@
.driver = {
.name = "spmi_pmic_arb",
.of_match_table = spmi_pmic_arb_match_table,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 8e5d100..4e895ee 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -617,7 +617,6 @@
static void msm_geni_serial_complete_rx_eot(struct uart_port *uport)
{
int poll_done = 0, tries = 0;
- u32 geni_status = 0;
struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
do {
@@ -626,11 +625,11 @@
tries++;
} while (!poll_done && tries < 5);
- geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS);
-
if (!poll_done)
- IPC_LOG_MSG(port->ipc_log_misc, "%s: RX_EOT, GENI:0x%x\n",
- __func__, geni_status);
+ IPC_LOG_MSG(port->ipc_log_misc,
+ "%s: RX_EOT, GENI:0x%x, DMA_DEBUG:0x%x\n", __func__,
+ geni_read_reg_nolog(uport->membase, SE_GENI_STATUS),
+ geni_read_reg_nolog(uport->membase, SE_DMA_DEBUG_REG0));
else
geni_write_reg_nolog(RX_EOT, uport->membase, SE_DMA_RX_IRQ_CLR);
}
@@ -1131,7 +1130,9 @@
* cancel control bit.
*/
mb();
- msm_geni_serial_complete_rx_eot(uport);
+ if (!uart_console(uport))
+ msm_geni_serial_complete_rx_eot(uport);
+
done = msm_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
S_GENI_CMD_CANCEL, false);
if (done) {
@@ -1856,6 +1857,7 @@
unsigned long ser_clk_cfg = 0;
struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
unsigned long clk_rate;
+ unsigned long flags;
if (!uart_console(uport)) {
int ret = msm_geni_serial_power_on(uport);
@@ -1867,7 +1869,13 @@
return;
}
}
+ /* Take a spinlock else stop_rx causes a race with an ISR due to Cancel
+ * and FSM_RESET. This also has a potential race with the dma_map/unmap
+ * operations of ISR.
+ */
+ spin_lock_irqsave(&uport->lock, flags);
msm_geni_serial_stop_rx(uport);
+ spin_unlock_irqrestore(&uport->lock, flags);
/* baud rate */
baud = uart_get_baud_rate(uport, termios, old, 300, 4000000);
port->cur_baud = baud;
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index efaac5e..60acbf7 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -1858,10 +1858,35 @@
return 0;
}
+
+static int msm_serial_freeze(struct device *dev)
+{
+ struct uart_port *port = dev_get_drvdata(dev);
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ int ret;
+
+ ret = msm_serial_suspend(dev);
+ if (ret)
+ return ret;
+
+ /*
+ * Set the rate as recommended to avoid issues where the clock
+ * driver skips reconfiguring the clock hardware during
+ * hibernation resume.
+ */
+ return clk_set_rate(msm_port->clk, 19200000);
+}
#endif
static const struct dev_pm_ops msm_serial_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(msm_serial_suspend, msm_serial_resume)
+#ifdef CONFIG_PM_SLEEP
+ .suspend = msm_serial_suspend,
+ .resume = msm_serial_resume,
+ .freeze = msm_serial_freeze,
+ .thaw = msm_serial_resume,
+ .poweroff = msm_serial_suspend,
+ .restore = msm_serial_resume,
+#endif
};
static struct platform_driver msm_platform_driver = {
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index ee33c0d..5532246 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1495,9 +1495,10 @@
* Some buses would like to keep their devices in suspend
* state after system resume. Their resume happen when
* a remote wakeup is detected or interface driver start
- * I/O.
+ * I/O. And in the case when the system is restoring from
+ * hibernation, make sure all the devices are resumed.
*/
- if (udev->bus->skip_resume)
+ if (udev->bus->skip_resume && msg.event != PM_EVENT_RESTORE)
return 0;
/* For all calls, take the device back to full power and
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index ef3f542..248dd9a 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1567,6 +1567,19 @@
return 0;
}
+static int dwc3_pm_restore(struct device *dev)
+{
+ /*
+ * Set the core as runtime active to prevent the runtime
+ * PM ops being called before the PM restore is completed.
+ */
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
static int dwc3_resume(struct device *dev)
{
struct dwc3 *dwc = dev_get_drvdata(dev);
@@ -1591,7 +1604,12 @@
#endif /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops dwc3_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume)
+ .suspend = dwc3_suspend,
+ .resume = dwc3_resume,
+ .freeze = dwc3_suspend,
+ .thaw = dwc3_pm_restore,
+ .poweroff = dwc3_suspend,
+ .restore = dwc3_pm_restore,
SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume,
dwc3_runtime_idle)
};
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 810546a..bb1aec7 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -2093,6 +2093,19 @@
dwc3_core_init(dwc);
/* Re-configure event buffers */
dwc3_event_buffers_setup(dwc);
+
+ /* Get initial P3 status and enable IN_P3 event */
+ val = dwc3_msm_read_reg_field(mdwc->base,
+ DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
+ atomic_set(&mdwc->in_p3, val == DWC3_LINK_STATE_U3);
+ dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
+ PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
+
+ if (mdwc->otg_state == OTG_STATE_A_HOST) {
+ dev_dbg(mdwc->dev, "%s: set the core in host mode\n",
+ __func__);
+ dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
+ }
}
static int dwc3_msm_prepare_suspend(struct dwc3_msm *mdwc)
@@ -2246,7 +2259,7 @@
}
}
-static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
+static int dwc3_msm_suspend(struct dwc3_msm *mdwc, bool hibernation)
{
int ret;
struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
@@ -2363,8 +2376,8 @@
clk_disable_unprepare(mdwc->xo_clk);
/* Perform controller power collapse */
- if (!mdwc->in_host_mode && (!mdwc->in_device_mode ||
- mdwc->in_restart)) {
+ if ((!mdwc->in_host_mode && (!mdwc->in_device_mode ||
+ mdwc->in_restart)) || hibernation) {
mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
dwc3_msm_config_gdsc(mdwc, 0);
@@ -2520,8 +2533,6 @@
/* Recover from controller power collapse */
if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
- u32 tmp;
-
if (mdwc->iommu_map) {
ret = arm_iommu_attach_device(mdwc->dev,
mdwc->iommu_map);
@@ -2536,13 +2547,6 @@
dwc3_msm_power_collapse_por(mdwc);
- /* Get initial P3 status and enable IN_P3 event */
- tmp = dwc3_msm_read_reg_field(mdwc->base,
- DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
- atomic_set(&mdwc->in_p3, tmp == DWC3_LINK_STATE_U3);
- dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
- PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
-
mdwc->lpm_flags &= ~MDWC3_POWER_COLLAPSE;
}
@@ -2821,8 +2825,11 @@
int ret;
mdwc->dwc3_gdsc = devm_regulator_get(mdwc->dev, "USB3_GDSC");
- if (IS_ERR(mdwc->dwc3_gdsc))
+ if (IS_ERR(mdwc->dwc3_gdsc)) {
+ if (PTR_ERR(mdwc->dwc3_gdsc) == -EPROBE_DEFER)
+ return PTR_ERR(mdwc->dwc3_gdsc);
mdwc->dwc3_gdsc = NULL;
+ }
mdwc->xo_clk = devm_clk_get(mdwc->dev, "xo");
if (IS_ERR(mdwc->xo_clk)) {
@@ -4401,7 +4408,39 @@
return -EBUSY;
}
- ret = dwc3_msm_suspend(mdwc);
+ ret = dwc3_msm_suspend(mdwc, false);
+ if (!ret)
+ atomic_set(&mdwc->pm_suspended, 1);
+
+ return ret;
+}
+
+static int dwc3_msm_pm_freeze(struct device *dev)
+{
+ int ret = 0;
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+ dev_dbg(dev, "dwc3-msm PM freeze\n");
+ dbg_event(0xFF, "PM Freeze", 0);
+
+ flush_workqueue(mdwc->dwc3_wq);
+
+ /* Resume the core to make sure we can power collapse it */
+ ret = dwc3_msm_resume(mdwc);
+
+ /*
+ * PHYs also needed to be power collapsed, so call the notify_disconnect
+ * before suspend to ensure it.
+ */
+ usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
+ mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
+ if (mdwc->ss_phy) {
+ usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
+ mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
+ }
+
+ ret = dwc3_msm_suspend(mdwc, true);
if (!ret)
atomic_set(&mdwc->pm_suspended, 1);
@@ -4425,6 +4464,35 @@
return 0;
}
+
+static int dwc3_msm_pm_restore(struct device *dev)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+ dev_dbg(dev, "dwc3-msm PM restore\n");
+ dbg_event(0xFF, "PM Restore", 0);
+
+ atomic_set(&mdwc->pm_suspended, 0);
+
+ dwc3_msm_resume(mdwc);
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ /* Restore PHY flags if hibernated in host mode */
+ if (mdwc->otg_state == OTG_STATE_A_HOST) {
+ usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
+ mdwc->hs_phy->flags |= PHY_HOST_MODE;
+ if (mdwc->ss_phy) {
+ usb_phy_notify_connect(mdwc->ss_phy, USB_SPEED_SUPER);
+ mdwc->ss_phy->flags |= PHY_HOST_MODE;
+ }
+ }
+
+
+ return 0;
+}
#endif
#ifdef CONFIG_PM
@@ -4447,7 +4515,7 @@
dev_dbg(dev, "DWC3-msm runtime suspend\n");
dbg_event(0xFF, "RT Sus", 0);
- return dwc3_msm_suspend(mdwc);
+ return dwc3_msm_suspend(mdwc, false);
}
static int dwc3_msm_runtime_resume(struct device *dev)
@@ -4463,7 +4531,12 @@
#endif
static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
+ .suspend = dwc3_msm_pm_suspend,
+ .resume = dwc3_msm_pm_resume,
+ .freeze = dwc3_msm_pm_freeze,
+ .restore = dwc3_msm_pm_restore,
+ .thaw = dwc3_msm_pm_restore,
+ .poweroff = dwc3_msm_pm_suspend,
SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
dwc3_msm_runtime_idle)
};
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index f0e4d5e..f878b8d1 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -414,7 +414,7 @@
dwc3_trace(trace_dwc3_gadget, "Command Timed Out");
dev_err(dwc->dev, "%s command timeout for %s\n",
dwc3_gadget_ep_cmd_string(cmd), dep->name);
- if (cmd != DWC3_DEPCMD_ENDTRANSFER) {
+ if (DWC3_DEPCMD_CMD(cmd) != DWC3_DEPCMD_ENDTRANSFER) {
dwc->ep_cmd_timeout_cnt++;
dwc3_notify_event(dwc,
DWC3_CONTROLLER_RESTART_USB_SESSION, 0);
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index d3e0ca5..90cbb61 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -568,6 +568,7 @@
config USB_CONFIGFS_F_GSI
bool "USB GSI function"
select USB_F_GSI
+ select USB_U_ETHER
depends on USB_CONFIGFS
help
Generic function driver to support h/w acceleration to IPA over GSI.
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 939c219..703fb24 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -3951,7 +3951,7 @@
usb_del_gadget_udc(&udc->gadget);
remove_trans:
if (udc->transceiver)
- otg_set_peripheral(udc->transceiver->otg, &udc->gadget);
+ otg_set_peripheral(udc->transceiver->otg, NULL);
err("error = %i", retval);
put_transceiver:
@@ -3989,7 +3989,7 @@
usb_del_gadget_udc(&udc->gadget);
if (udc->transceiver) {
- otg_set_peripheral(udc->transceiver->otg, &udc->gadget);
+ otg_set_peripheral(udc->transceiver->otg, NULL);
usb_put_phy(udc->transceiver);
}
destroy_eps(udc);
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index 5ffbf12..d3799e5 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -14,12 +14,18 @@
#include "f_gsi.h"
#include "rndis.h"
+struct usb_gsi_debugfs {
+ struct dentry *debugfs_root;
+};
+
+static struct usb_gsi_debugfs debugfs;
+
static bool qti_packet_debug;
module_param(qti_packet_debug, bool, 0644);
MODULE_PARM_DESC(qti_packet_debug, "Print QTI Packet's Raw Data");
static struct workqueue_struct *ipa_usb_wq;
-static struct f_gsi *__gsi[IPA_USB_MAX_TETH_PROT_SIZE];
+static struct f_gsi *__gsi[USB_PROT_MAX];
static void *ipc_log_ctxt;
#define NUM_LOG_PAGES 15
@@ -56,6 +62,15 @@
static struct gsi_ctrl_pkt *gsi_ctrl_pkt_alloc(unsigned int len, gfp_t flags);
static void gsi_ctrl_pkt_free(struct gsi_ctrl_pkt *pkt);
+static inline bool is_ext_prot_ether(int prot_id)
+{
+ if (prot_id == USB_PROT_RMNET_ETHER ||
+ prot_id == USB_PROT_DPL_ETHER)
+ return true;
+
+ return false;
+}
+
static inline bool usb_gsi_remote_wakeup_allowed(struct usb_function *f)
{
bool remote_wakeup_allowed;
@@ -196,6 +211,223 @@
return ret;
}
+static void debugfs_rw_timer_func(unsigned long arg)
+{
+ struct f_gsi *gsi;
+
+ gsi = (struct f_gsi *)arg;
+
+ if (!atomic_read(&gsi->connected)) {
+ log_event_dbg("%s: gsi not connected..del timer\n", __func__);
+ gsi->debugfs_rw_enable = 0;
+ del_timer(&gsi->debugfs_rw_timer);
+ return;
+ }
+
+ log_event_dbg("%s: calling gsi_wakeup_host\n", __func__);
+ gsi_wakeup_host(gsi);
+
+ if (gsi->debugfs_rw_enable) {
+ log_event_dbg("%s: re-arm the timer\n", __func__);
+ mod_timer(&gsi->debugfs_rw_timer,
+ jiffies + msecs_to_jiffies(gsi->debugfs_rw_interval));
+ }
+}
+
+static struct f_gsi *get_connected_gsi(void)
+{
+ struct f_gsi *connected_gsi;
+ bool gsi_connected = false;
+ unsigned int i;
+
+ for (i = 0; i < IPA_USB_MAX_TETH_PROT_SIZE; i++) {
+ connected_gsi = __gsi[i];
+ if (connected_gsi && atomic_read(&connected_gsi->connected)) {
+ gsi_connected = true;
+ break;
+ }
+ }
+
+ if (!gsi_connected)
+ connected_gsi = NULL;
+
+ return connected_gsi;
+}
+
+#define DEFAULT_RW_TIMER_INTERVAL 500 /* in ms */
+static ssize_t usb_gsi_rw_write(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ struct f_gsi *gsi;
+ u8 input;
+ int ret;
+
+ gsi = get_connected_gsi();
+ if (!gsi) {
+ log_event_dbg("%s: gsi not connected\n", __func__);
+ goto err;
+ }
+
+ if (ubuf == NULL) {
+ log_event_dbg("%s: buffer is Null.\n", __func__);
+ goto err;
+ }
+
+ ret = kstrtou8_from_user(ubuf, count, 0, &input);
+ if (ret) {
+ log_event_err("%s: Invalid value. err:%d\n", __func__, ret);
+ goto err;
+ }
+
+ if (gsi->debugfs_rw_enable == !!input) {
+ if (!!input)
+ log_event_dbg("%s: RW already enabled\n", __func__);
+ else
+ log_event_dbg("%s: RW already disabled\n", __func__);
+ goto err;
+ }
+
+ gsi->debugfs_rw_enable = !!input;
+ if (gsi->debugfs_rw_enable) {
+ init_timer(&gsi->debugfs_rw_timer);
+ gsi->debugfs_rw_timer.data = (unsigned long) gsi;
+ gsi->debugfs_rw_timer.function = debugfs_rw_timer_func;
+
+ /* Use default remote wakeup timer interval if it is not set */
+ if (!gsi->debugfs_rw_interval)
+ gsi->debugfs_rw_interval = DEFAULT_RW_TIMER_INTERVAL;
+ gsi->debugfs_rw_timer.expires = jiffies +
+ msecs_to_jiffies(gsi->debugfs_rw_interval);
+ add_timer(&gsi->debugfs_rw_timer);
+ log_event_dbg("%s: timer initialized\n", __func__);
+ } else {
+ del_timer_sync(&gsi->debugfs_rw_timer);
+ log_event_dbg("%s: timer deleted\n", __func__);
+ }
+
+err:
+ return count;
+}
+
+static int usb_gsi_rw_show(struct seq_file *s, void *unused)
+{
+
+ struct f_gsi *gsi;
+
+ gsi = get_connected_gsi();
+ if (!gsi) {
+ log_event_dbg("%s: gsi not connected\n", __func__);
+ return 0;
+ }
+
+ seq_printf(s, "%d\n", gsi->debugfs_rw_enable);
+
+ return 0;
+}
+
+static int usb_gsi_rw_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, usb_gsi_rw_show, inode->i_private);
+}
+
+static const struct file_operations fops_usb_gsi_rw = {
+ .open = usb_gsi_rw_open,
+ .read = seq_read,
+ .write = usb_gsi_rw_write,
+ .owner = THIS_MODULE,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static ssize_t usb_gsi_rw_timer_write(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ struct f_gsi *gsi;
+ u16 timer_val;
+ int ret;
+
+ gsi = get_connected_gsi();
+ if (!gsi) {
+ log_event_dbg("%s: gsi not connected\n", __func__);
+ goto err;
+ }
+
+ if (ubuf == NULL) {
+ log_event_dbg("%s: buffer is NULL.\n", __func__);
+ goto err;
+ }
+
+ ret = kstrtou16_from_user(ubuf, count, 0, &timer_val);
+ if (ret) {
+ log_event_err("%s: Invalid value. err:%d\n", __func__, ret);
+ goto err;
+ }
+
+ if (timer_val <= 0 || timer_val > 10000) {
+ log_event_err("%s: value must be > 0 and < 10000.\n", __func__);
+ goto err;
+ }
+
+ gsi->debugfs_rw_interval = timer_val;
+err:
+ return count;
+}
+
+static int usb_gsi_rw_timer_show(struct seq_file *s, void *unused)
+{
+ struct f_gsi *gsi;
+ unsigned int timer_interval;
+
+ gsi = get_connected_gsi();
+ if (!gsi) {
+ log_event_dbg("%s: gsi not connected\n", __func__);
+ return 0;
+ }
+
+ timer_interval = DEFAULT_RW_TIMER_INTERVAL;
+ if (gsi->debugfs_rw_interval)
+ timer_interval = gsi->debugfs_rw_interval;
+
+ seq_printf(s, "%ums\n", timer_interval);
+
+ return 0;
+}
+
+static int usb_gsi_rw_timer_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, usb_gsi_rw_timer_show, inode->i_private);
+}
+
+static const struct file_operations fops_usb_gsi_rw_timer = {
+ .open = usb_gsi_rw_timer_open,
+ .read = seq_read,
+ .write = usb_gsi_rw_timer_write,
+ .owner = THIS_MODULE,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int usb_gsi_debugfs_init(void)
+{
+ debugfs.debugfs_root = debugfs_create_dir("usb_gsi", NULL);
+ if (!debugfs.debugfs_root)
+ return -ENOMEM;
+
+ debugfs_create_file("remote_wakeup_enable", 0600,
+ debugfs.debugfs_root,
+ __gsi, &fops_usb_gsi_rw);
+ debugfs_create_file("remote_wakeup_interval", 0600,
+ debugfs.debugfs_root,
+ __gsi,
+ &fops_usb_gsi_rw_timer);
+ return 0;
+}
+
+static void usb_gsi_debugfs_exit(void)
+{
+ debugfs_remove_recursive(debugfs.debugfs_root);
+}
+
/*
* Callback for when when network interface is up
* and userspace is ready to answer DHCP requests, or remote wakeup
@@ -226,7 +458,7 @@
log_event_err("%s: Set net_ready_trigger", __func__);
gsi->d_port.net_ready_trigger = true;
- if (gsi->prot_id == IPA_USB_ECM) {
+ if (gsi->prot_id == USB_PROT_ECM_IPA) {
cpkt_notify_connect = gsi_ctrl_pkt_alloc(0, GFP_ATOMIC);
if (IS_ERR(cpkt_notify_connect)) {
spin_unlock_irqrestore(&gsi->d_port.lock,
@@ -260,7 +492,7 @@
* Do not post EVT_CONNECTED for RNDIS.
* Data path for RNDIS is enabled on EVT_HOST_READY.
*/
- if (gsi->prot_id != IPA_USB_RNDIS) {
+ if (gsi->prot_id != USB_PROT_RNDIS_IPA) {
post_event(&gsi->d_port, EVT_CONNECTED);
queue_work(gsi->d_port.ipa_usb_wq,
&gsi->d_port.usb_ipa_w);
@@ -311,7 +543,7 @@
log_event_dbg("%s: USB GSI IN OPS Completed", __func__);
in_params->client =
- (gsi->prot_id != IPA_USB_DIAG) ? IPA_CLIENT_USB_CONS :
+ (gsi->prot_id != USB_PROT_DIAG_IPA) ? IPA_CLIENT_USB_CONS :
IPA_CLIENT_USB_DPL_CONS;
in_params->ipa_ep_cfg.mode.mode = IPA_BASIC;
in_params->teth_prot = gsi->prot_id;
@@ -395,7 +627,7 @@
conn_params->usb_to_ipa_xferrscidx =
d_port->out_xfer_rsc_index;
conn_params->usb_to_ipa_xferrscidx_valid =
- (gsi->prot_id != IPA_USB_DIAG) ? true : false;
+ (gsi->prot_id != USB_PROT_DIAG_IPA) ? true : false;
conn_params->ipa_to_usb_xferrscidx_valid = true;
conn_params->teth_prot = gsi->prot_id;
conn_params->teth_prot_params.max_xfer_size_bytes_to_dev = 23700;
@@ -440,7 +672,7 @@
d_port->in_request.db_reg_phs_addr_msb =
ipa_in_channel_out_params.db_reg_phs_addr_msb;
- if (gsi->prot_id != IPA_USB_DIAG) {
+ if (gsi->prot_id != USB_PROT_DIAG_IPA) {
d_port->out_channel_handle =
ipa_out_channel_out_params.clnt_hdl;
d_port->out_request.db_reg_phs_addr_lsb =
@@ -1159,7 +1391,8 @@
switch (cmd) {
case QTI_CTRL_MODEM_OFFLINE:
- if (gsi->prot_id == IPA_USB_DIAG) {
+ if (gsi->prot_id == USB_PROT_DIAG_IPA ||
+ gsi->prot_id == USB_PROT_DPL_ETHER) {
log_event_dbg("%s:Modem Offline not handled", __func__);
goto exit_ioctl;
}
@@ -1177,7 +1410,8 @@
gsi_ctrl_send_notification(gsi);
break;
case QTI_CTRL_MODEM_ONLINE:
- if (gsi->prot_id == IPA_USB_DIAG) {
+ if (gsi->prot_id == USB_PROT_DIAG_IPA ||
+ gsi->prot_id == USB_PROT_DPL_ETHER) {
log_event_dbg("%s:Modem Online not handled", __func__);
goto exit_ioctl;
}
@@ -1186,7 +1420,8 @@
break;
case QTI_CTRL_GET_LINE_STATE:
val = atomic_read(&gsi->connected);
- if (gsi->prot_id == IPA_USB_RMNET)
+ if (gsi->prot_id == USB_PROT_RMNET_IPA ||
+ gsi->prot_id == USB_PROT_RMNET_ETHER)
val = gsi->rmnet_dtr_status;
ret = copy_to_user((void __user *)arg, &val, sizeof(val));
@@ -1207,32 +1442,43 @@
break;
}
- if (gsi->prot_id == IPA_USB_DIAG &&
+ if ((gsi->prot_id == USB_PROT_DIAG_IPA ||
+ gsi->prot_id == USB_PROT_DPL_ETHER) &&
(gsi->d_port.in_channel_handle == -EINVAL)) {
ret = -EAGAIN;
break;
}
- if (gsi->d_port.in_channel_handle == -EINVAL &&
- gsi->d_port.out_channel_handle == -EINVAL) {
- ret = -EAGAIN;
- break;
+ if (gsi->prot_id != USB_PROT_GPS_CTRL) {
+ if (gsi->d_port.in_channel_handle == -EINVAL &&
+ gsi->d_port.out_channel_handle == -EINVAL) {
+ ret = -EAGAIN;
+ break;
+ }
+ info.ph_ep_info.ep_type = GSI_MBIM_DATA_EP_TYPE_HSUSB;
+ info.ph_ep_info.peripheral_iface_id = gsi->data_id;
+ } else {
+ info.ph_ep_info.ep_type = GSI_MBIM_DATA_EP_TYPE_HSUSB;
+ info.ph_ep_info.peripheral_iface_id = gsi->ctrl_id;
}
- info.ph_ep_info.ep_type = GSI_MBIM_DATA_EP_TYPE_HSUSB;
- info.ph_ep_info.peripheral_iface_id = gsi->data_id;
- info.ipa_ep_pair.cons_pipe_num =
- (gsi->prot_id == IPA_USB_DIAG) ? -1 :
- gsi->d_port.out_channel_handle;
- info.ipa_ep_pair.prod_pipe_num = gsi->d_port.in_channel_handle;
-
log_event_dbg("%s: prot id :%d ep_type:%d intf:%d",
__func__, gsi->prot_id, info.ph_ep_info.ep_type,
info.ph_ep_info.peripheral_iface_id);
+ if (gsi->prot_id != USB_PROT_GPS_CTRL) {
+ info.ipa_ep_pair.cons_pipe_num =
+ (gsi->prot_id == USB_PROT_DIAG_IPA ||
+ gsi->prot_id == USB_PROT_DPL_ETHER) ? -1 :
+ gsi->d_port.out_channel_handle;
+ info.ipa_ep_pair.prod_pipe_num =
+ gsi->d_port.in_channel_handle;
- log_event_dbg("%s: ipa_cons_idx:%d ipa_prod_idx:%d",
- __func__, info.ipa_ep_pair.cons_pipe_num,
- info.ipa_ep_pair.prod_pipe_num);
+
+ log_event_dbg("%s: ipa_cons_idx:%d ipa_prod_idx:%d",
+ __func__,
+ info.ipa_ep_pair.cons_pipe_num,
+ info.ipa_ep_pair.prod_pipe_num);
+ }
ret = copy_to_user((void __user *)arg, &info,
sizeof(info));
@@ -1328,8 +1574,8 @@
static int gsi_function_ctrl_port_init(struct f_gsi *gsi)
{
int ret;
+ char *cdev_name = NULL;
int sz = GSI_CTRL_NAME_LEN;
- bool ctrl_dev_create = true;
INIT_LIST_HEAD(&gsi->c_port.cpkt_req_q);
INIT_LIST_HEAD(&gsi->c_port.cpkt_resp_q);
@@ -1338,17 +1584,33 @@
init_waitqueue_head(&gsi->c_port.read_wq);
- if (gsi->prot_id == IPA_USB_RMNET)
- strlcat(gsi->c_port.name, GSI_RMNET_CTRL_NAME, sz);
- else if (gsi->prot_id == IPA_USB_MBIM)
- strlcat(gsi->c_port.name, GSI_MBIM_CTRL_NAME, sz);
- else if (gsi->prot_id == IPA_USB_DIAG)
- strlcat(gsi->c_port.name, GSI_DPL_CTRL_NAME, sz);
- else
- ctrl_dev_create = false;
+ switch (gsi->prot_id) {
+ case USB_PROT_RMNET_IPA:
+ cdev_name = GSI_RMNET_CTRL_NAME;
+ break;
+ case USB_PROT_RMNET_ETHER:
+ cdev_name = ETHER_RMNET_CTRL_NAME;
+ break;
+ case USB_PROT_MBIM_IPA:
+ cdev_name = GSI_MBIM_CTRL_NAME;
+ break;
+ case USB_PROT_DIAG_IPA:
+ cdev_name = GSI_DPL_CTRL_NAME;
+ break;
+ case USB_PROT_DPL_ETHER:
+ cdev_name = ETHER_DPL_CTRL_NAME;
+ break;
+ case USB_PROT_GPS_CTRL:
+ cdev_name = GSI_GPS_CTRL_NAME;
+ break;
+ default:
+ break;
+ }
- if (!ctrl_dev_create)
+ if (!cdev_name)
return 0;
+ else
+ strlcat(gsi->c_port.name, cdev_name, sz);
gsi->c_port.ctrl_device.name = gsi->c_port.name;
gsi->c_port.ctrl_device.fops = &gsi_ctrl_dev_fops;
@@ -1419,6 +1681,12 @@
} else {
log_event_dbg("%s: posting HOST_READY\n", __func__);
post_event(d_port, EVT_HOST_READY);
+ /*
+ * If host supports flow control with RNDIS_MSG_INIT then
+ * mark the flag to true. This flag will be used further to
+ * enable the flow control on resume path.
+ */
+ gsi->host_supports_flow_control = true;
}
queue_work(gsi->d_port.ipa_usb_wq, &gsi->d_port.usb_ipa_w);
@@ -1512,7 +1780,7 @@
event->wValue = cpu_to_le16(0);
event->wLength = cpu_to_le16(0);
- if (gsi->prot_id == IPA_USB_RNDIS) {
+ if (gsi->prot_id == USB_PROT_RNDIS_IPA) {
data = req->buf;
data[0] = cpu_to_le32(1);
data[1] = cpu_to_le32(0);
@@ -1737,7 +2005,7 @@
/* read the request; process it later */
value = w_length;
req->context = gsi;
- if (gsi->prot_id == IPA_USB_RNDIS)
+ if (gsi->prot_id == USB_PROT_RNDIS_IPA)
req->complete = gsi_rndis_command_complete;
else
req->complete = gsi_ctrl_cmd_complete;
@@ -1749,7 +2017,7 @@
if (w_value || w_index != id)
goto invalid;
- if (gsi->prot_id == IPA_USB_RNDIS) {
+ if (gsi->prot_id == USB_PROT_RNDIS_IPA) {
/* return the result */
buf = rndis_get_next_response(gsi->params, &n);
if (buf) {
@@ -1785,7 +2053,8 @@
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_REQ_SET_CONTROL_LINE_STATE:
line_state = (w_value & GSI_CTRL_DTR ? true : false);
- if (gsi->prot_id == IPA_USB_RMNET)
+ if (gsi->prot_id == USB_PROT_RMNET_IPA ||
+ gsi->prot_id == USB_PROT_RMNET_ETHER)
gsi->rmnet_dtr_status = line_state;
log_event_dbg("%s: USB_CDC_REQ_SET_CONTROL_LINE_STATE DTR:%d\n",
__func__, line_state);
@@ -1882,9 +2151,10 @@
struct f_gsi *gsi = func_to_gsi(f);
/* RNDIS, RMNET and DPL only support alt 0*/
- if (intf == gsi->ctrl_id || gsi->prot_id == IPA_USB_RNDIS ||
- gsi->prot_id == IPA_USB_RMNET ||
- gsi->prot_id == IPA_USB_DIAG)
+ if (intf == gsi->ctrl_id || gsi->prot_id == USB_PROT_RNDIS_IPA ||
+ gsi->prot_id == USB_PROT_RMNET_IPA ||
+ gsi->prot_id == USB_PROT_DIAG_IPA ||
+ is_ext_prot_ether(gsi->prot_id))
return 0;
else if (intf == gsi->data_id)
return gsi->data_interface_up;
@@ -2003,7 +2273,8 @@
log_event_dbg("intf=%u, alt=%u", intf, alt);
/* Control interface has only altsetting 0 */
- if (intf == gsi->ctrl_id || gsi->prot_id == IPA_USB_RMNET) {
+ if (intf == gsi->ctrl_id || gsi->prot_id == USB_PROT_RMNET_IPA ||
+ gsi->prot_id == USB_PROT_RMNET_ETHER) {
if (alt != 0)
goto fail;
@@ -2037,10 +2308,11 @@
if (intf == gsi->data_id) {
gsi->d_port.net_ready_trigger = false;
/* for rndis and rmnet alt is always 0 update alt accordingly */
- if (gsi->prot_id == IPA_USB_RNDIS ||
- gsi->prot_id == IPA_USB_RMNET ||
- gsi->prot_id == IPA_USB_DIAG)
- alt = 1;
+ if (gsi->prot_id == USB_PROT_RNDIS_IPA ||
+ gsi->prot_id == USB_PROT_RMNET_IPA ||
+ gsi->prot_id == USB_PROT_DIAG_IPA ||
+ is_ext_prot_ether(gsi->prot_id))
+ alt = 1;
if (alt > 1)
goto notify_ep_disable;
@@ -2067,8 +2339,9 @@
}
/* Configure EPs for GSI */
- if (gsi->d_port.in_ep) {
- if (gsi->prot_id == IPA_USB_DIAG)
+ if (gsi->d_port.in_ep &&
+ gsi->prot_id <= USB_PROT_DIAG_IPA) {
+ if (gsi->prot_id == USB_PROT_DIAG_IPA)
gsi->d_port.in_ep->ep_intr_num = 3;
else
gsi->d_port.in_ep->ep_intr_num = 2;
@@ -2077,7 +2350,8 @@
GSI_EP_OP_CONFIG);
}
- if (gsi->d_port.out_ep) {
+ if (gsi->d_port.out_ep &&
+ gsi->prot_id <= USB_PROT_DIAG_IPA) {
gsi->d_port.out_ep->ep_intr_num = 1;
usb_gsi_ep_op(gsi->d_port.out_ep,
&gsi->d_port.out_request,
@@ -2086,7 +2360,17 @@
gsi->d_port.gadget = cdev->gadget;
- if (gsi->prot_id == IPA_USB_RNDIS) {
+ if (is_ext_prot_ether(gsi->prot_id)) {
+ net = gether_connect(&gsi->d_port.gether_port);
+ if (IS_ERR(net)) {
+ pr_err("%s:gether_connect err:%ld\n",
+ __func__, PTR_ERR(net));
+ goto notify_ep_disable;
+ }
+ gsi->d_port.gether_port.cdc_filter = 0;
+ }
+
+ if (gsi->prot_id == USB_PROT_RNDIS_IPA) {
gsi_rndis_open(gsi);
net = gsi_rndis_get_netdev("rndis0");
if (IS_ERR(net))
@@ -2098,7 +2382,7 @@
&gsi->d_port.cdc_filter);
}
- if (gsi->prot_id == IPA_USB_ECM)
+ if (gsi->prot_id == USB_PROT_ECM_IPA)
gsi->d_port.cdc_filter = DEFAULT_FILTER;
/*
@@ -2106,7 +2390,7 @@
* handler which is invoked when the host sends the
* GEN_CURRENT_PACKET_FILTER message.
*/
- if (gsi->prot_id != IPA_USB_RNDIS)
+ if (gsi->prot_id != USB_PROT_RNDIS_IPA)
post_event(&gsi->d_port,
EVT_CONNECT_IN_PROGRESS);
queue_work(gsi->d_port.ipa_usb_wq,
@@ -2127,7 +2411,8 @@
atomic_set(&gsi->connected, 1);
/* send 0 len pkt to qti to notify state change */
- if (gsi->prot_id == IPA_USB_DIAG)
+ if (gsi->prot_id == USB_PROT_DIAG_IPA ||
+ gsi->prot_id == USB_PROT_DPL_ETHER)
gsi_ctrl_send_cpkt_tomodem(gsi, NULL, 0);
return 0;
@@ -2145,10 +2430,11 @@
atomic_set(&gsi->connected, 0);
- if (gsi->prot_id == IPA_USB_RNDIS)
+ if (gsi->prot_id == USB_PROT_RNDIS_IPA)
rndis_uninit(gsi->params);
- if (gsi->prot_id == IPA_USB_RMNET)
+ if (gsi->prot_id == USB_PROT_RMNET_IPA ||
+ gsi->prot_id == USB_PROT_RMNET_ETHER)
gsi->rmnet_dtr_status = false;
/* Disable Control Path */
@@ -2170,7 +2456,15 @@
gsi->data_interface_up = false;
+ gsi->host_supports_flow_control = false;
+
log_event_dbg("%s deactivated", gsi->function.name);
+
+ if (is_ext_prot_ether(gsi->prot_id)) {
+ gether_disconnect(&gsi->d_port.gether_port);
+ return;
+ }
+
ipa_disconnect_handler(&gsi->d_port);
post_event(&gsi->d_port, EVT_DISCONNECTED);
queue_work(gsi->d_port.ipa_usb_wq, &gsi->d_port.usb_ipa_w);
@@ -2187,6 +2481,11 @@
return;
}
+ if (!gsi->data_interface_up) {
+ log_event_dbg("%s: suspend done\n", __func__);
+ return;
+ }
+
block_db = true;
usb_gsi_ep_op(gsi->d_port.in_ep, (void *)&block_db,
GSI_EP_OP_SET_CLR_BLOCK_DBL);
@@ -2216,13 +2515,20 @@
/* Check any pending cpkt, and queue immediately on resume */
gsi_ctrl_send_notification(gsi);
+ if (!gsi->data_interface_up) {
+ log_event_dbg("%s: resume done\n", __func__);
+ return;
+ }
+
/*
* Linux host does not send RNDIS_MSG_INIT or non-zero
* RNDIS_MESSAGE_PACKET_FILTER after performing bus resume.
+ * Check whether host supports flow_control are not. If yes
* Trigger state machine explicitly on resume.
*/
- if (gsi->prot_id == IPA_USB_RNDIS &&
- !usb_gsi_remote_wakeup_allowed(f))
+ if (gsi->prot_id == USB_PROT_RNDIS_IPA &&
+ !usb_gsi_remote_wakeup_allowed(f) &&
+ gsi->host_supports_flow_control)
rndis_flow_control(gsi->params, false);
post_event(&gsi->d_port, EVT_RESUMED);
@@ -2327,7 +2633,7 @@
info->data_nop_desc->bInterfaceNumber = gsi->data_id;
/* allocate instance-specific endpoints */
- if (info->fs_in_desc) {
+ if (info->fs_in_desc && gsi->prot_id <= USB_PROT_DIAG_IPA) {
ep = usb_ep_autoconfig_by_name(cdev->gadget,
info->fs_in_desc, info->in_epname);
if (!ep)
@@ -2335,9 +2641,17 @@
gsi->d_port.in_ep = ep;
msm_ep_config(gsi->d_port.in_ep, NULL);
ep->driver_data = cdev; /* claim */
+ } else {
+ if (info->fs_in_desc) {
+ ep = usb_ep_autoconfig(cdev->gadget, info->fs_in_desc);
+ if (!ep)
+ goto fail;
+ gsi->d_port.in_ep = ep;
+ ep->driver_data = cdev; /* claim */
+ }
}
- if (info->fs_out_desc) {
+ if (info->fs_out_desc && gsi->prot_id <= USB_PROT_DIAG_IPA) {
ep = usb_ep_autoconfig_by_name(cdev->gadget,
info->fs_out_desc, info->out_epname);
if (!ep)
@@ -2345,6 +2659,14 @@
gsi->d_port.out_ep = ep;
msm_ep_config(gsi->d_port.out_ep, NULL);
ep->driver_data = cdev; /* claim */
+ } else {
+ if (info->fs_out_desc) {
+ ep = usb_ep_autoconfig(cdev->gadget, info->fs_out_desc);
+ if (!ep)
+ goto fail;
+ gsi->d_port.out_ep = ep;
+ ep->driver_data = cdev; /* claim */
+ }
}
if (info->fs_notify_desc) {
@@ -2475,14 +2797,17 @@
struct gsi_function_bind_info info = {0};
struct f_gsi *gsi = func_to_gsi(f);
struct rndis_params *params;
+ struct net_device *net;
+ char *name = NULL;
int status;
__u8 class;
__u8 subclass;
__u8 proto;
- if (gsi->prot_id == IPA_USB_RMNET ||
- gsi->prot_id == IPA_USB_DIAG)
+ if (gsi->prot_id == USB_PROT_RMNET_IPA ||
+ gsi->prot_id == USB_PROT_DIAG_IPA ||
+ is_ext_prot_ether(gsi->prot_id))
gsi->ctrl_id = -ENODEV;
else {
status = gsi->ctrl_id = usb_interface_id(c, f);
@@ -2490,12 +2815,14 @@
goto fail;
}
- status = gsi->data_id = usb_interface_id(c, f);
- if (status < 0)
- goto fail;
+ if (gsi->prot_id != USB_PROT_GPS_CTRL) {
+ status = gsi->data_id = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ }
switch (gsi->prot_id) {
- case IPA_USB_RNDIS:
+ case USB_PROT_RNDIS_IPA:
info.string_defs = rndis_gsi_string_defs;
info.ctrl_desc = &rndis_gsi_control_intf;
info.ctrl_str_idx = 0;
@@ -2641,7 +2968,7 @@
info.ctrl_desc->bInterfaceProtocol = proto;
break;
- case IPA_USB_MBIM:
+ case USB_PROT_MBIM_IPA:
info.string_defs = mbim_gsi_string_defs;
info.ctrl_desc = &mbim_gsi_control_intf;
info.ctrl_str_idx = 0;
@@ -2688,7 +3015,8 @@
c->bConfigurationValue + '0';
}
break;
- case IPA_USB_RMNET:
+ case USB_PROT_RMNET_IPA:
+ case USB_PROT_RMNET_ETHER:
info.string_defs = rmnet_gsi_string_defs;
info.data_desc = &rmnet_gsi_interface_desc;
info.data_str_idx = 0;
@@ -2713,8 +3041,9 @@
info.out_req_buf_len = GSI_OUT_RMNET_BUF_LEN;
info.out_req_num_buf = GSI_NUM_OUT_BUFFERS;
info.notify_buf_len = sizeof(struct usb_cdc_notification);
+ name = "usb_rmnet";
break;
- case IPA_USB_ECM:
+ case USB_PROT_ECM_IPA:
info.string_defs = ecm_gsi_string_defs;
info.ctrl_desc = &ecm_gsi_control_intf;
info.ctrl_str_idx = 0;
@@ -2763,7 +3092,8 @@
gsi->d_port.ipa_init_params.host_ethaddr[5]);
info.string_defs[1].s = gsi->ethaddr;
break;
- case IPA_USB_DIAG:
+ case USB_PROT_DIAG_IPA:
+ case USB_PROT_DPL_ETHER:
info.string_defs = qdss_gsi_string_defs;
info.data_desc = &qdss_gsi_data_intf_desc;
info.data_str_idx = 0;
@@ -2778,6 +3108,19 @@
info.in_req_buf_len = 16384;
info.in_req_num_buf = GSI_NUM_IN_BUFFERS;
info.notify_buf_len = sizeof(struct usb_cdc_notification);
+ name = "dpl_usb";
+ break;
+ case USB_PROT_GPS_CTRL:
+ info.string_defs = gps_string_defs;
+ info.ctrl_str_idx = 0;
+ info.ctrl_desc = &gps_interface_desc;
+ info.fs_notify_desc = &gps_fs_notify_desc;
+ info.hs_notify_desc = &gps_hs_notify_desc;
+ info.ss_notify_desc = &gps_ss_notify_desc;
+ info.fs_desc_hdr = gps_fs_function;
+ info.hs_desc_hdr = gps_hs_function;
+ info.ss_desc_hdr = gps_ss_function;
+ info.notify_buf_len = sizeof(struct usb_cdc_notification);
break;
default:
log_event_err("%s: Invalid prot id %d", __func__,
@@ -2789,6 +3132,32 @@
if (status)
goto dereg_rndis;
+ if (gsi->prot_id == USB_PROT_GPS_CTRL)
+ goto skip_ipa_init;
+
+ if (is_ext_prot_ether(gsi->prot_id)) {
+ if (!name)
+ return -EINVAL;
+
+ gsi->d_port.gether_port.in_ep = gsi->d_port.in_ep;
+ gsi->d_port.gether_port.out_ep = gsi->d_port.out_ep;
+ net = gether_setup_name_default(name);
+ if (IS_ERR(net)) {
+ pr_err("%s: gether_setup failed\n", __func__);
+ return PTR_ERR(net);
+ }
+ gsi->d_port.gether_port.ioport = netdev_priv(net);
+ gether_set_gadget(net, c->cdev->gadget);
+ status = gether_register_netdev(net);
+ if (status < 0) {
+ pr_err("%s: gether_register_netdev failed\n",
+ __func__);
+ free_netdev(net);
+ return status;
+ }
+ goto skip_ipa_init;
+ }
+
status = ipa_register_ipa_ready_cb(ipa_ready_callback, gsi);
if (!status) {
log_event_info("%s: ipa is not ready", __func__);
@@ -2814,6 +3183,7 @@
gsi->d_port.sm_state = STATE_INITIALIZED;
+skip_ipa_init:
DBG(cdev, "%s: %s speed IN/%s OUT/%s NOTIFY/%s\n",
f->name,
gadget_is_superspeed(c->cdev->gadget) ? "super" :
@@ -2836,6 +3206,12 @@
{
struct f_gsi *gsi = func_to_gsi(f);
+ if (is_ext_prot_ether(gsi->prot_id)) {
+ gether_cleanup(gsi->d_port.gether_port.ioport);
+ gsi->d_port.gether_port.ioport = NULL;
+ goto skip_ipa_dinit;
+ }
+
/*
* Use drain_workqueue to accomplish below conditions:
* 1. Make sure that any running work completed
@@ -2847,12 +3223,13 @@
drain_workqueue(gsi->d_port.ipa_usb_wq);
ipa_usb_deinit_teth_prot(gsi->prot_id);
- if (gsi->prot_id == IPA_USB_RNDIS) {
+skip_ipa_dinit:
+ if (gsi->prot_id == USB_PROT_RNDIS_IPA) {
gsi->d_port.sm_state = STATE_UNINITIALIZED;
rndis_deregister(gsi->params);
}
- if (gsi->prot_id == IPA_USB_MBIM)
+ if (gsi->prot_id == USB_PROT_MBIM_IPA)
mbim_gsi_ext_config_desc.function.subCompatibleID[0] = 0;
if (gadget_is_superspeed(c->cdev->gadget)) {
@@ -2883,33 +3260,38 @@
static int gsi_bind_config(struct f_gsi *gsi)
{
int status = 0;
- enum ipa_usb_teth_prot prot_id = gsi->prot_id;
- log_event_dbg("%s: prot id %d", __func__, prot_id);
+ log_event_dbg("%s: prot id %d", __func__, gsi->prot_id);
- switch (prot_id) {
- case IPA_USB_RNDIS:
+ switch (gsi->prot_id) {
+ case USB_PROT_RNDIS_IPA:
gsi->function.name = "rndis";
gsi->function.strings = rndis_gsi_strings;
break;
- case IPA_USB_ECM:
+ case USB_PROT_ECM_IPA:
gsi->function.name = "cdc_ethernet";
gsi->function.strings = ecm_gsi_strings;
break;
- case IPA_USB_RMNET:
+ case USB_PROT_RMNET_IPA:
+ case USB_PROT_RMNET_ETHER:
gsi->function.name = "rmnet";
gsi->function.strings = rmnet_gsi_strings;
break;
- case IPA_USB_MBIM:
+ case USB_PROT_MBIM_IPA:
gsi->function.name = "mbim";
gsi->function.strings = mbim_gsi_strings;
break;
- case IPA_USB_DIAG:
+ case USB_PROT_DIAG_IPA:
+ case USB_PROT_DPL_ETHER:
gsi->function.name = "dpl";
gsi->function.strings = qdss_gsi_strings;
break;
+ case USB_PROT_GPS_CTRL:
+ gsi->function.name = "gps";
+ gsi->function.strings = gps_strings;
+ break;
default:
- log_event_err("%s: invalid prot id %d", __func__, prot_id);
+ log_event_err("%s: invalid prot id %d", __func__, gsi->prot_id);
return -EINVAL;
}
@@ -3164,7 +3546,7 @@
static int gsi_set_inst_name(struct usb_function_instance *fi,
const char *name)
{
- int prot_id, name_len, ret = 0;
+ int name_len, prot_id, ret = 0;
struct gsi_opts *opts;
struct f_gsi *gsi;
@@ -3181,7 +3563,7 @@
return -EINVAL;
}
- if (prot_id == IPA_USB_RNDIS)
+ if (prot_id == USB_PROT_RNDIS_IPA)
config_group_init_type_name(&opts->func_inst.group, "",
&gsi_func_rndis_type);
@@ -3249,7 +3631,7 @@
return -ENOMEM;
}
- for (i = 0; i < IPA_USB_MAX_TETH_PROT_SIZE; i++) {
+ for (i = 0; i < USB_PROT_MAX; i++) {
__gsi[i] = gsi_function_init();
if (IS_ERR(__gsi[i]))
return PTR_ERR(__gsi[i]);
@@ -3259,6 +3641,7 @@
if (!ipc_log_ctxt)
pr_err("%s: Err allocating ipc_log_ctxt\n", __func__);
+ usb_gsi_debugfs_init();
return usb_function_register(&gsiusb_func);
}
module_init(fgsi_init);
@@ -3272,9 +3655,10 @@
if (ipc_log_ctxt)
ipc_log_context_destroy(ipc_log_ctxt);
- for (i = 0; i < IPA_USB_MAX_TETH_PROT_SIZE; i++)
+ for (i = 0; i < USB_PROT_MAX; i++)
kfree(__gsi[i]);
+ usb_gsi_debugfs_exit();
usb_function_unregister(&gsiusb_func);
}
module_exit(fgsi_exit);
diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h
index c6e64fd..cd146a0 100644
--- a/drivers/usb/gadget/function/f_gsi.h
+++ b/drivers/usb/gadget/function/f_gsi.h
@@ -26,10 +26,17 @@
#include <linux/debugfs.h>
#include <linux/ipa_usb.h>
#include <linux/ipc_logging.h>
+#include <linux/timer.h>
+
+#include "u_ether.h"
#define GSI_RMNET_CTRL_NAME "rmnet_ctrl"
#define GSI_MBIM_CTRL_NAME "android_mbim"
#define GSI_DPL_CTRL_NAME "dpl_ctrl"
+#define ETHER_RMNET_CTRL_NAME "rmnet_ctrl0"
+#define ETHER_DPL_CTRL_NAME "dpl_ctrl0"
+#define GSI_GPS_CTRL_NAME "gps"
+
#define GSI_CTRL_NAME_LEN (sizeof(GSI_MBIM_CTRL_NAME)+2)
#define GSI_MAX_CTRL_PKT_SIZE 4096
#define GSI_CTRL_DTR (1 << 0)
@@ -114,6 +121,22 @@
RNDIS_ID_MAX,
};
+enum usb_prot_id {
+ /* accelerated: redefined from ipa_usb.h, do not change order */
+ USB_PROT_RNDIS_IPA,
+ USB_PROT_ECM_IPA,
+ USB_PROT_RMNET_IPA,
+ USB_PROT_MBIM_IPA,
+ USB_PROT_DIAG_IPA,
+
+ /* non-accelerated */
+ USB_PROT_RMNET_ETHER,
+ USB_PROT_DPL_ETHER,
+ USB_PROT_GPS_CTRL,
+
+ USB_PROT_MAX,
+};
+
#define MAXQUEUELEN 128
struct event_queue {
u8 event[MAXQUEUELEN];
@@ -228,6 +251,7 @@
enum connection_state sm_state;
struct event_queue evt_q;
wait_queue_head_t wait_for_ipa_ready;
+ struct gether gether_port;
/* Track these for debugfs */
struct ipa_usb_xdci_chan_params ipa_in_channel_params;
@@ -237,7 +261,7 @@
struct f_gsi {
struct usb_function function;
- enum ipa_usb_teth_prot prot_id;
+ enum usb_prot_id prot_id;
int ctrl_id;
int data_id;
u32 vendorID;
@@ -254,6 +278,11 @@
struct gsi_data_port d_port;
struct gsi_ctrl_port c_port;
bool rmnet_dtr_status;
+ /* To test remote wakeup using debugfs */
+ struct timer_list debugfs_rw_timer;
+ u8 debugfs_rw_enable;
+ u16 debugfs_rw_interval;
+ bool host_supports_flow_control;
};
static inline struct f_gsi *func_to_gsi(struct usb_function *f)
@@ -285,21 +314,27 @@
func_inst.group);
}
-static enum ipa_usb_teth_prot name_to_prot_id(const char *name)
+static int name_to_prot_id(const char *name)
{
if (!name)
goto error;
if (!strncasecmp(name, "rndis", MAX_INST_NAME_LEN))
- return IPA_USB_RNDIS;
+ return USB_PROT_RNDIS_IPA;
if (!strncasecmp(name, "ecm", MAX_INST_NAME_LEN))
- return IPA_USB_ECM;
+ return USB_PROT_ECM_IPA;
if (!strncasecmp(name, "rmnet", MAX_INST_NAME_LEN))
- return IPA_USB_RMNET;
+ return USB_PROT_RMNET_IPA;
if (!strncasecmp(name, "mbim", MAX_INST_NAME_LEN))
- return IPA_USB_MBIM;
+ return USB_PROT_MBIM_IPA;
if (!strncasecmp(name, "dpl", MAX_INST_NAME_LEN))
- return IPA_USB_DIAG;
+ return USB_PROT_DIAG_IPA;
+ if (!strncasecmp(name, "rmnet.ether", MAX_INST_NAME_LEN))
+ return USB_PROT_RMNET_ETHER;
+ if (!strncasecmp(name, "dpl.ether", MAX_INST_NAME_LEN))
+ return USB_PROT_DPL_ETHER;
+ if (!strncasecmp(name, "gps", MAX_INST_NAME_LEN))
+ return USB_PROT_GPS_CTRL;
error:
return -EINVAL;
@@ -309,6 +344,7 @@
#define LOG2_STATUS_INTERVAL_MSEC 5
#define MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
+#define GPS_MAX_NOTIFY_SIZE 64
/* rmnet device descriptors */
@@ -1399,4 +1435,91 @@
&qdss_gsi_string_table,
NULL,
};
+
+/* gps device descriptor */
+static struct usb_interface_descriptor gps_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
+ /* .iInterface = DYNAMIC */
+};
+
+/* Full speed support */
+static struct usb_endpoint_descriptor gps_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(GPS_MAX_NOTIFY_SIZE),
+ .bInterval = 1 << LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_descriptor_header *gps_fs_function[] = {
+ (struct usb_descriptor_header *) &gps_interface_desc,
+ (struct usb_descriptor_header *) &gps_fs_notify_desc,
+ NULL,
+};
+
+/* High speed support */
+static struct usb_endpoint_descriptor gps_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(GPS_MAX_NOTIFY_SIZE),
+ .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_descriptor_header *gps_hs_function[] = {
+ (struct usb_descriptor_header *) &gps_interface_desc,
+ (struct usb_descriptor_header *) &gps_hs_notify_desc,
+ NULL,
+};
+
+/* Super speed support */
+static struct usb_endpoint_descriptor gps_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(GPS_MAX_NOTIFY_SIZE),
+ .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor gps_ss_notify_comp_desc = {
+ .bLength = sizeof(gps_ss_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(GPS_MAX_NOTIFY_SIZE),
+};
+
+static struct usb_descriptor_header *gps_ss_function[] = {
+ (struct usb_descriptor_header *) &gps_interface_desc,
+ (struct usb_descriptor_header *) &gps_ss_notify_desc,
+ (struct usb_descriptor_header *) &gps_ss_notify_comp_desc,
+ NULL,
+};
+
+/* String descriptors */
+
+static struct usb_string gps_string_defs[] = {
+ [0].s = "GPS",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings gps_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = gps_string_defs,
+};
+
+static struct usb_gadget_strings *gps_strings[] = {
+ &gps_string_table,
+ NULL,
+};
#endif
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index 651776d..3f25946 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -613,7 +613,17 @@
return -EINVAL;
spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_OFFLINE) {
+ spin_unlock_irq(&dev->lock);
+ return -ENODEV;
+ }
+
if (dev->ep_out->desc) {
+ if (!cdev) {
+ spin_unlock_irq(&dev->lock);
+ return -ENODEV;
+ }
+
len = usb_ep_align_maybe(cdev->gadget, dev->ep_out, count);
if (len > MTP_BULK_BUFFER_SIZE) {
spin_unlock_irq(&dev->lock);
@@ -1498,7 +1508,10 @@
while ((req = mtp_req_get(dev, &dev->intr_idle)))
mtp_request_free(req, dev->ep_intr);
mutex_unlock(&dev->read_mutex);
+ spin_lock_irq(&dev->lock);
dev->state = STATE_OFFLINE;
+ dev->cdev = NULL;
+ spin_unlock_irq(&dev->lock);
kfree(f->os_desc_table);
f->os_desc_n = 0;
fi_mtp->func_inst.f = NULL;
@@ -1554,7 +1567,9 @@
struct usb_composite_dev *cdev = dev->cdev;
DBG(cdev, "mtp_function_disable\n");
+ spin_lock_irq(&dev->lock);
dev->state = STATE_OFFLINE;
+ spin_unlock_irq(&dev->lock);
usb_ep_disable(dev->ep_in);
usb_ep_disable(dev->ep_out);
usb_ep_disable(dev->ep_intr);
diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c
index 0ce2c407..312ae24 100644
--- a/drivers/usb/gadget/function/f_qdss.c
+++ b/drivers/usb/gadget/function/f_qdss.c
@@ -1205,7 +1205,7 @@
return &usb_qdss->port.function;
}
-DECLARE_USB_FUNCTION_INIT(qdss, qdss_alloc_inst, qdss_alloc);
+DECLARE_USB_FUNCTION(qdss, qdss_alloc_inst, qdss_alloc);
static int __init usb_qdss_init(void)
{
int ret;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 1ce079d..75afa8f 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -439,6 +439,39 @@
return -EBUSY;
}
+static int xhci_plat_pm_freeze(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ if (!xhci)
+ return 0;
+
+ dev_dbg(dev, "xhci-plat freeze\n");
+
+ return xhci_suspend(xhci, false);
+}
+
+static int xhci_plat_pm_restore(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ int ret;
+
+ if (!xhci)
+ return 0;
+
+ dev_dbg(dev, "xhci-plat restore\n");
+
+ ret = xhci_resume(xhci, true);
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_mark_last_busy(dev);
+
+ return ret;
+}
+
static int xhci_plat_runtime_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
@@ -470,7 +503,9 @@
}
static const struct dev_pm_ops xhci_plat_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(NULL, NULL)
+ .freeze = xhci_plat_pm_freeze,
+ .restore = xhci_plat_pm_restore,
+ .thaw = xhci_plat_pm_restore,
SET_RUNTIME_PM_OPS(xhci_plat_runtime_suspend, xhci_plat_runtime_resume,
xhci_plat_runtime_idle)
};
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index fe2bbfb..9cba037 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -947,7 +947,7 @@
struct usb_hcd *hcd = xhci_to_hcd(xhci);
u32 command;
- if (!hcd->state)
+ if (!hcd->state || xhci->suspended)
return 0;
if (hcd->state != HC_STATE_SUSPENDED ||
@@ -1017,6 +1017,7 @@
/* step 5: remove core well power */
/* synchronize irq when using MSI-X */
xhci_msix_sync_irqs(xhci);
+ xhci->suspended = true;
return rc;
}
@@ -1036,7 +1037,7 @@
int retval = 0;
bool comp_timer_running = false;
- if (!hcd->state)
+ if (!hcd->state || !xhci->suspended)
return 0;
/* Wait a bit if either of the roothubs need to settle from the
@@ -1173,6 +1174,7 @@
/* Re-enable port polling. */
xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
+ xhci->suspended = false;
set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
usb_hcd_poll_rh_status(xhci->shared_hcd);
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 4c1f556..a5153ca 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1697,6 +1697,7 @@
/* Compliance Mode Recovery Data */
struct timer_list comp_mode_recovery_timer;
u32 port_status_u0;
+ bool suspended;
/* Compliance Mode Timer Triggered every 2 seconds */
#define COMP_MODE_RCVRY_MSECS 2000
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
index e28173b..33a7f6a 100644
--- a/drivers/usb/phy/phy-msm-qusb-v2.c
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -149,6 +149,7 @@
/* override TUNEX registers value */
struct dentry *root;
u8 tune[5];
+ u8 bias_ctrl2;
struct hrtimer timer;
int soc_min_rev;
@@ -494,6 +495,10 @@
writel_relaxed(BIAS_CTRL_2_OVERRIDE_VAL,
qphy->base + qphy->phy_reg[BIAS_CTRL_2]);
+ if (qphy->bias_ctrl2)
+ writel_relaxed(qphy->bias_ctrl2,
+ qphy->base + qphy->phy_reg[BIAS_CTRL_2]);
+
/* Ensure above write is completed before turning ON ref clk */
wmb();
@@ -585,6 +590,10 @@
writel_relaxed(BIAS_CTRL_2_OVERRIDE_VAL,
qphy->base + qphy->phy_reg[BIAS_CTRL_2]);
+ if (qphy->bias_ctrl2)
+ writel_relaxed(qphy->bias_ctrl2,
+ qphy->base + qphy->phy_reg[BIAS_CTRL_2]);
+
/* ensure above writes are completed before re-enabling PHY */
wmb();
@@ -946,11 +955,21 @@
dev_err(qphy->phy.dev,
"can't create debugfs entry for %s\n", name);
debugfs_remove_recursive(qphy->root);
- ret = ENOMEM;
+ ret = -ENOMEM;
goto create_err;
}
}
+ file = debugfs_create_x8("bias_ctrl2", 0644, qphy->root,
+ &qphy->bias_ctrl2);
+ if (IS_ERR_OR_NULL(file)) {
+ dev_err(qphy->phy.dev,
+ "can't create debugfs entry for bias_ctrl2\n");
+ debugfs_remove_recursive(qphy->root);
+ ret = -ENOMEM;
+ goto create_err;
+ }
+
create_err:
return ret;
}
diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c
index 9c33c6e..47db12b 100644
--- a/drivers/usb/phy/phy-msm-qusb.c
+++ b/drivers/usb/phy/phy-msm-qusb.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -706,7 +706,9 @@
qphy->base + QUSB2PHY_PORT_INTR_CTRL);
/* Disable PHY */
- writel_relaxed(POWER_DOWN,
+ writel_relaxed(POWER_DOWN |
+ readl_relaxed(qphy->base +
+ QUSB2PHY_PORT_POWERDOWN),
qphy->base + QUSB2PHY_PORT_POWERDOWN);
/* Make sure that above write is completed */
wmb();
diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c
index ad3bbf7..1795d24 100644
--- a/drivers/usb/phy/phy-msm-ssusb-qmp.c
+++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c
@@ -25,10 +25,10 @@
#include <linux/clk.h>
#include <linux/reset.h>
-enum core_ldo_levels {
- CORE_LEVEL_NONE = 0,
- CORE_LEVEL_MIN,
- CORE_LEVEL_MAX,
+enum ldo_levels {
+ VOLTAGE_LEVEL_NONE = 0,
+ VOLTAGE_LEVEL_MIN,
+ VOLTAGE_LEVEL_MAX,
};
#define INIT_MAX_TIME_USEC 1000
@@ -38,6 +38,8 @@
#define USB_SSPHY_1P2_VOL_MAX 1200000 /* uV */
#define USB_SSPHY_HPM_LOAD 23000 /* uA */
+#define USB_SSPHY_LOAD_DEFAULT -1
+
/* USB3PHY_PCIE_USB3_PCS_PCS_STATUS bit */
#define PHYSTATUS BIT(6)
@@ -119,6 +121,9 @@
int vdd_levels[3]; /* none, low, high */
struct regulator *core_ldo;
int core_voltage_levels[3];
+ struct regulator *fpc_redrive_ldo;
+ int redrive_voltage_levels[3];
+ int redrive_load;
struct clk *ref_clk_src;
struct clk *ref_clk;
struct clk *aux_clk;
@@ -226,6 +231,33 @@
}
}
+static int msm_ldo_enable(struct msm_ssphy_qmp *phy,
+ struct regulator *ldo, int *voltage_levels, int load)
+{
+ int ret = 0;
+
+ dev_dbg(phy->phy.dev,
+ "ldo: min_vol:%duV max_vol:%duV\n",
+ voltage_levels[VOLTAGE_LEVEL_MIN],
+ voltage_levels[VOLTAGE_LEVEL_MAX]);
+
+ if (load > 0) {
+ ret = regulator_set_load(ldo, load);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = regulator_set_voltage(ldo,
+ voltage_levels[VOLTAGE_LEVEL_MIN],
+ voltage_levels[VOLTAGE_LEVEL_MAX]);
+ if (ret)
+ return ret;
+
+ ret = regulator_enable(ldo);
+
+ return ret;
+}
+
static int msm_ssusb_qmp_ldo_enable(struct msm_ssphy_qmp *phy, int on)
{
int min, rc = 0;
@@ -245,74 +277,65 @@
if (!on)
goto disable_regulators;
- rc = regulator_set_voltage(phy->vdd, phy->vdd_levels[min],
- phy->vdd_levels[2]);
- if (rc) {
- dev_err(phy->phy.dev, "unable to set voltage for ssusb vdd\n");
- return rc;
+ if (phy->fpc_redrive_ldo) {
+ rc = msm_ldo_enable(phy, phy->fpc_redrive_ldo,
+ phy->redrive_voltage_levels,
+ phy->redrive_load);
+ if (rc < 0) {
+ dev_err(phy->phy.dev,
+ "enable phy->fpc_redrive_ldo failed\n");
+ return rc;
+ }
+
+ dev_dbg(phy->phy.dev,
+ "fpc redrive ldo: min_vol:%duV max_vol:%duV\n",
+ phy->redrive_voltage_levels[VOLTAGE_LEVEL_MIN],
+ phy->redrive_voltage_levels[VOLTAGE_LEVEL_MAX]);
}
- dev_dbg(phy->phy.dev, "min_vol:%d max_vol:%d\n",
- phy->vdd_levels[min], phy->vdd_levels[2]);
-
- rc = regulator_enable(phy->vdd);
- if (rc) {
- dev_err(phy->phy.dev,
- "regulator_enable(phy->vdd) failed, ret=%d",
- rc);
- goto unconfig_vdd;
- }
-
- rc = regulator_set_load(phy->core_ldo, USB_SSPHY_HPM_LOAD);
+ rc = msm_ldo_enable(phy, phy->vdd, phy->vdd_levels,
+ USB_SSPHY_LOAD_DEFAULT);
if (rc < 0) {
- dev_err(phy->phy.dev, "Unable to set HPM of core_ldo\n");
+ dev_err(phy->phy.dev, "enable phy->vdd failed\n");
+ goto disable_fpc_redrive;
+ }
+
+ dev_dbg(phy->phy.dev,
+ "vdd ldo: min_vol:%duV max_vol:%duV\n",
+ phy->vdd_levels[VOLTAGE_LEVEL_MIN],
+ phy->vdd_levels[VOLTAGE_LEVEL_MAX]);
+
+ rc = msm_ldo_enable(phy, phy->core_ldo, phy->core_voltage_levels,
+ USB_SSPHY_HPM_LOAD);
+ if (rc < 0) {
+ dev_err(phy->phy.dev, "enable phy->core_ldo failed\n");
goto disable_vdd;
}
- rc = regulator_set_voltage(phy->core_ldo,
- phy->core_voltage_levels[CORE_LEVEL_MIN],
- phy->core_voltage_levels[CORE_LEVEL_MAX]);
- if (rc) {
- dev_err(phy->phy.dev, "unable to set voltage for core_ldo\n");
- goto put_core_ldo_lpm;
- }
-
- rc = regulator_enable(phy->core_ldo);
- if (rc) {
- dev_err(phy->phy.dev, "Unable to enable core_ldo\n");
- goto unset_core_ldo;
- }
+ dev_dbg(phy->phy.dev,
+ "core ldo: min_vol:%duV max_vol:%duV\n",
+ phy->core_voltage_levels[VOLTAGE_LEVEL_MIN],
+ phy->core_voltage_levels[VOLTAGE_LEVEL_MAX]);
return 0;
disable_regulators:
rc = regulator_disable(phy->core_ldo);
if (rc)
- dev_err(phy->phy.dev, "Unable to disable core_ldo\n");
-
-unset_core_ldo:
- rc = regulator_set_voltage(phy->core_ldo,
- phy->core_voltage_levels[CORE_LEVEL_NONE],
- phy->core_voltage_levels[CORE_LEVEL_MAX]);
- if (rc)
- dev_err(phy->phy.dev, "unable to set voltage for core_ldo\n");
-
-put_core_ldo_lpm:
- rc = regulator_set_load(phy->core_ldo, 0);
- if (rc < 0)
- dev_err(phy->phy.dev, "Unable to set LPM of core_ldo\n");
+ dev_err(phy->phy.dev, "disable phy->core_ldo failed\n");
disable_vdd:
rc = regulator_disable(phy->vdd);
if (rc)
- dev_err(phy->phy.dev, "regulator_disable(phy->vdd) failed, ret=%d",
- rc);
+ dev_err(phy->phy.dev, "disable phy->vdd failed\n");
-unconfig_vdd:
- rc = regulator_set_voltage(phy->vdd, phy->vdd_levels[min],
- phy->vdd_levels[2]);
- if (rc)
- dev_err(phy->phy.dev, "unable to set voltage for ssusb vdd\n");
+disable_fpc_redrive:
+ if (phy->fpc_redrive_ldo) {
+ rc = regulator_disable(phy->fpc_redrive_ldo);
+ if (rc)
+ dev_err(phy->phy.dev,
+ "disable phy->fpc_redrive_ldo failed\n");
+ }
return rc < 0 ? rc : 0;
}
@@ -634,6 +657,9 @@
if (suspend) {
if (phy->cable_connected)
msm_ssusb_qmp_enable_autonomous(phy, 1);
+ else
+ writel_relaxed(0x00,
+ phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
/* Make sure above write completed with PHY */
wmb();
@@ -944,9 +970,9 @@
}
/* Set default core voltage values */
- phy->core_voltage_levels[CORE_LEVEL_NONE] = 0;
- phy->core_voltage_levels[CORE_LEVEL_MIN] = USB_SSPHY_1P2_VOL_MIN;
- phy->core_voltage_levels[CORE_LEVEL_MAX] = USB_SSPHY_1P2_VOL_MAX;
+ phy->core_voltage_levels[VOLTAGE_LEVEL_NONE] = 0;
+ phy->core_voltage_levels[VOLTAGE_LEVEL_MIN] = USB_SSPHY_1P2_VOL_MIN;
+ phy->core_voltage_levels[VOLTAGE_LEVEL_MAX] = USB_SSPHY_1P2_VOL_MAX;
if (of_get_property(dev->of_node, "qcom,core-voltage-level", &len) &&
len == sizeof(phy->core_voltage_levels)) {
@@ -990,6 +1016,39 @@
goto err;
}
+ phy->fpc_redrive_ldo = devm_regulator_get_optional(dev, "fpc-redrive");
+ if (IS_ERR(phy->fpc_redrive_ldo)) {
+ phy->fpc_redrive_ldo = NULL;
+ dev_dbg(dev, "no FPC re-drive ldo regulator\n");
+ } else {
+ if (of_get_property(dev->of_node,
+ "qcom,redrive-voltage-level", &len) &&
+ len == sizeof(phy->redrive_voltage_levels)) {
+ ret = of_property_read_u32_array(dev->of_node,
+ "qcom,redrive-voltage-level",
+ (u32 *) phy->redrive_voltage_levels,
+ len / sizeof(u32));
+ if (ret) {
+ dev_err(dev,
+ "err qcom,redrive-voltage-level\n");
+ goto err;
+ }
+ } else {
+ ret = -EINVAL;
+ dev_err(dev, "err inputs for redrive-voltage-level\n");
+ goto err;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "qcom,redrive-load",
+ &phy->redrive_load);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to read redrive load\n");
+ goto err;
+ }
+
+ dev_dbg(dev, "Get FPC re-drive ldo regulator\n");
+ }
+
platform_set_drvdata(pdev, phy);
if (of_property_read_bool(dev->of_node, "qcom,vbus-valid-override"))
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 39086bf..1550cae 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -213,6 +213,7 @@
bool enable_streaming;
bool enable_axi_prefetch;
bool vbus_low_as_hostmode;
+ bool phy_id_high_as_peripheral;
};
#define SDP_CHECK_DELAY_MS 10000 /* in ms */
@@ -257,11 +258,21 @@
MODULE_PARM_DESC(lpm_disconnect_thresh,
"Delay before entering LPM on USB disconnect");
+static bool floated_charger_enable;
+module_param(floated_charger_enable, bool, 0644);
+MODULE_PARM_DESC(floated_charger_enable,
+ "Whether to enable floated charger");
+
/* by default debugging is enabled */
static unsigned int enable_dbg_log = 1;
module_param(enable_dbg_log, uint, 0644);
MODULE_PARM_DESC(enable_dbg_log, "Debug buffer events");
+/* Max current to be drawn for DCP charger */
+static int dcp_max_current = IDEV_CHG_MAX;
+module_param(dcp_max_current, int, 0644);
+MODULE_PARM_DESC(dcp_max_current, "max current drawn for DCP charger");
+
static struct msm_otg *the_msm_otg;
static bool debug_bus_voting_enabled;
@@ -1324,6 +1335,7 @@
struct msm_otg_platform_data *pdata = motg->pdata;
int cnt;
bool host_bus_suspend, device_bus_suspend, sm_work_busy;
+ bool host_pc_charger;
u32 cmd_val;
u32 portsc, config2;
u32 func_ctrl;
@@ -1351,6 +1363,9 @@
if (host_bus_suspend)
msm_otg_perf_vote_update(motg, false);
+ host_pc_charger = (motg->chg_type == USB_SDP_CHARGER) ||
+ (motg->chg_type == USB_CDP_CHARGER);
+
/* !BSV, but its handling is in progress by otg sm_work */
sm_work_busy = !test_bit(B_SESS_VLD, &motg->inputs) &&
phy->otg->state == OTG_STATE_B_PERIPHERAL;
@@ -1377,8 +1392,8 @@
* Don't abort suspend in case of dcp detected by PMIC
*/
- if ((test_bit(B_SESS_VLD, &motg->inputs) && !device_bus_suspend) ||
- sm_work_busy) {
+ if ((test_bit(B_SESS_VLD, &motg->inputs) && !device_bus_suspend &&
+ host_pc_charger) || sm_work_busy) {
msm_otg_dbg_log_event(phy, "LPM ENTER ABORTED",
motg->inputs, 0);
enable_irq(motg->irq);
@@ -1824,7 +1839,49 @@
return pval.intval;
}
-static void msm_otg_notify_chg_current(struct msm_otg *motg, unsigned int mA)
+static int msm_otg_notify_chg_type(struct msm_otg *motg)
+{
+ static int charger_type;
+ union power_supply_propval propval;
+ int ret = 0;
+ /*
+ * TODO
+ * Unify OTG driver charger types and power supply charger types
+ */
+ if (charger_type == motg->chg_type)
+ return 0;
+
+ if (motg->chg_type == USB_SDP_CHARGER)
+ charger_type = POWER_SUPPLY_TYPE_USB;
+ else if (motg->chg_type == USB_CDP_CHARGER)
+ charger_type = POWER_SUPPLY_TYPE_USB_CDP;
+ else if (motg->chg_type == USB_DCP_CHARGER ||
+ motg->chg_type == USB_NONCOMPLIANT_CHARGER)
+ charger_type = POWER_SUPPLY_TYPE_USB_DCP;
+ else if (motg->chg_type == USB_FLOATED_CHARGER)
+ charger_type = POWER_SUPPLY_TYPE_USB_FLOAT;
+ else
+ charger_type = POWER_SUPPLY_TYPE_UNKNOWN;
+
+ if (!psy) {
+ dev_err(motg->phy.dev, "no usb power supply registered\n");
+ return -ENODEV;
+ }
+
+ pr_debug("Trying to set usb power supply type %d\n", charger_type);
+
+ propval.intval = charger_type;
+ ret = power_supply_set_property(psy, POWER_SUPPLY_PROP_REAL_TYPE,
+ &propval);
+ if (ret)
+ dev_dbg(motg->phy.dev, "power supply error when setting property\n");
+
+ msm_otg_dbg_log_event(&motg->phy, "SET USB PWR SUPPLY TYPE",
+ motg->chg_type, charger_type);
+ return ret;
+}
+
+static void msm_otg_notify_charger(struct msm_otg *motg, unsigned int mA)
{
struct usb_gadget *g = motg->phy.otg->gadget;
union power_supply_propval pval = {0};
@@ -1833,6 +1890,12 @@
if (g && g->is_a_peripheral)
return;
+ dev_dbg(motg->phy.dev, "Requested curr from USB = %u\n", mA);
+
+ if (msm_otg_notify_chg_type(motg))
+ dev_dbg(motg->phy.dev, "Failed notifying %d charger type to PMIC\n",
+ motg->chg_type);
+
psy_type = get_psy_type(motg);
if (psy_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
if (!mA)
@@ -1842,9 +1905,7 @@
goto set_prop;
}
- dev_dbg(motg->phy.dev, "Requested curr from USB = %u\n", mA);
-
- if (motg->cur_power == mA || psy_type != POWER_SUPPLY_TYPE_USB)
+ if (motg->cur_power == mA)
return;
dev_info(motg->phy.dev, "Avail curr from USB = %u\n", mA);
@@ -1863,15 +1924,12 @@
motg->cur_power = mA;
}
-static void msm_otg_notify_chg_current_work(struct work_struct *w)
+static void msm_otg_notify_charger_work(struct work_struct *w)
{
struct msm_otg *motg = container_of(w,
- struct msm_otg, notify_chg_current_work);
- /*
- * Gadget driver uses set_power method to notify about the
- * available current based on suspend/configured states.
- */
- msm_otg_notify_chg_current(motg, motg->notify_current_mA);
+ struct msm_otg, notify_charger_work);
+
+ msm_otg_notify_charger(motg, motg->notify_current_mA);
}
static int msm_otg_set_power(struct usb_phy *phy, unsigned int mA)
@@ -1879,7 +1937,14 @@
struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
motg->notify_current_mA = mA;
- schedule_work(&motg->notify_chg_current_work);
+ /*
+ * Gadget driver uses set_power method to notify about the
+ * available current based on suspend/configured states.
+ */
+ if (motg->chg_type == USB_SDP_CHARGER ||
+ get_psy_type(motg) == POWER_SUPPLY_TYPE_USB ||
+ get_psy_type(motg) == POWER_SUPPLY_TYPE_USB_FLOAT)
+ queue_work(motg->otg_wq, &motg->notify_charger_work);
return 0;
}
@@ -2280,6 +2345,301 @@
return true;
}
+static bool msm_chg_check_secondary_det(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+ u32 chg_det;
+
+ chg_det = ulpi_read(phy, 0x87);
+
+ return (chg_det & 1);
+}
+
+static void msm_chg_enable_secondary_det(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+
+ /*
+ * Configure DM as current source, DP as current sink
+ * and enable battery charging comparators.
+ */
+ ulpi_write(phy, 0x8, 0x85);
+ ulpi_write(phy, 0x2, 0x85);
+ ulpi_write(phy, 0x1, 0x85);
+}
+
+static bool msm_chg_check_primary_det(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+ u32 chg_det;
+ bool ret = false;
+
+ chg_det = ulpi_read(phy, 0x87);
+ ret = chg_det & 1;
+ /* Turn off VDP_SRC */
+ ulpi_write(phy, 0x3, 0x86);
+ msleep(20);
+
+ return ret;
+}
+
+static void msm_chg_enable_primary_det(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+
+ /*
+ * Configure DP as current source, DM as current sink
+ * and enable battery charging comparators.
+ */
+ ulpi_write(phy, 0x2, 0x85);
+ ulpi_write(phy, 0x1, 0x85);
+}
+
+static bool msm_chg_check_dcd(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+ u32 line_state;
+
+ line_state = ulpi_read(phy, 0x87);
+
+ return line_state & 2;
+}
+
+static void msm_chg_disable_dcd(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+
+ ulpi_write(phy, 0x10, 0x86);
+ /*
+ * Disable the Rdm_down after
+ * the DCD is completed.
+ */
+ ulpi_write(phy, 0x04, 0x0C);
+}
+
+static void msm_chg_enable_dcd(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+
+ /*
+ * Idp_src and Rdm_down are de-coupled
+ * on Femto PHY. If Idp_src alone is
+ * enabled, DCD timeout is observed with
+ * wall charger. But a genuine DCD timeout
+ * may be incorrectly interpreted. Also
+ * BC1.2 compliance testers expect Rdm_down
+ * to enabled during DCD. Enable Rdm_down
+ * explicitly before enabling the DCD.
+ */
+ ulpi_write(phy, 0x04, 0x0B);
+ ulpi_write(phy, 0x10, 0x85);
+}
+
+static void msm_chg_block_on(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+ u32 func_ctrl;
+
+ /* put the controller in non-driving mode */
+ func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL);
+ func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
+ func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
+ ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL);
+
+ /* disable DP and DM pull down resistors */
+ ulpi_write(phy, 0x6, 0xC);
+ /* Clear charger detecting control bits */
+ ulpi_write(phy, 0x1F, 0x86);
+ /* Clear alt interrupt latch and enable bits */
+ ulpi_write(phy, 0x1F, 0x92);
+ ulpi_write(phy, 0x1F, 0x95);
+ udelay(100);
+}
+
+static void msm_chg_block_off(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+ u32 func_ctrl;
+
+ /* Clear charger detecting control bits */
+ ulpi_write(phy, 0x3F, 0x86);
+ /* Clear alt interrupt latch and enable bits */
+ ulpi_write(phy, 0x1F, 0x92);
+ ulpi_write(phy, 0x1F, 0x95);
+ /* re-enable DP and DM pull down resistors */
+ ulpi_write(phy, 0x6, 0xB);
+
+ /* put the controller in normal mode */
+ func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL);
+ func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
+ func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NORMAL;
+ ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL);
+}
+
+#define MSM_CHG_DCD_TIMEOUT (750 * HZ/1000) /* 750 msec */
+#define MSM_CHG_DCD_POLL_TIME (50 * HZ/1000) /* 50 msec */
+#define MSM_CHG_PRIMARY_DET_TIME (50 * HZ/1000) /* TVDPSRC_ON */
+#define MSM_CHG_SECONDARY_DET_TIME (50 * HZ/1000) /* TVDMSRC_ON */
+
+static void msm_chg_detect_work(struct work_struct *w)
+{
+ struct msm_otg *motg = container_of(w, struct msm_otg, chg_work.work);
+ struct usb_phy *phy = &motg->phy;
+ bool is_dcd = false, tmout, vout, queue_sm_work = false;
+ static bool dcd;
+ u32 line_state, dm_vlgc;
+ unsigned long delay = 0;
+
+ dev_dbg(phy->dev, "chg detection work\n");
+ msm_otg_dbg_log_event(phy, "CHG DETECTION WORK",
+ motg->chg_state, get_pm_runtime_counter(phy->dev));
+
+ switch (motg->chg_state) {
+ case USB_CHG_STATE_UNDEFINED:
+ pm_runtime_get_sync(phy->dev);
+ msm_chg_block_on(motg);
+ case USB_CHG_STATE_IN_PROGRESS:
+ if (!motg->vbus_state) {
+ motg->chg_state = USB_CHG_STATE_UNDEFINED;
+ motg->chg_type = USB_INVALID_CHARGER;
+ msm_chg_block_off(motg);
+ pm_runtime_put_sync(phy->dev);
+ return;
+ }
+
+ msm_chg_enable_dcd(motg);
+ motg->chg_state = USB_CHG_STATE_WAIT_FOR_DCD;
+ motg->dcd_time = 0;
+ delay = MSM_CHG_DCD_POLL_TIME;
+ break;
+ case USB_CHG_STATE_WAIT_FOR_DCD:
+ if (!motg->vbus_state) {
+ motg->chg_state = USB_CHG_STATE_IN_PROGRESS;
+ break;
+ }
+
+ is_dcd = msm_chg_check_dcd(motg);
+ motg->dcd_time += MSM_CHG_DCD_POLL_TIME;
+ tmout = motg->dcd_time >= MSM_CHG_DCD_TIMEOUT;
+ if (is_dcd || tmout) {
+ if (is_dcd)
+ dcd = true;
+ else
+ dcd = false;
+ msm_chg_disable_dcd(motg);
+ msm_chg_enable_primary_det(motg);
+ delay = MSM_CHG_PRIMARY_DET_TIME;
+ motg->chg_state = USB_CHG_STATE_DCD_DONE;
+ } else {
+ delay = MSM_CHG_DCD_POLL_TIME;
+ }
+ break;
+ case USB_CHG_STATE_DCD_DONE:
+ if (!motg->vbus_state) {
+ motg->chg_state = USB_CHG_STATE_IN_PROGRESS;
+ break;
+ }
+
+ vout = msm_chg_check_primary_det(motg);
+ line_state = readl_relaxed(USB_PORTSC) & PORTSC_LS;
+ dm_vlgc = line_state & PORTSC_LS_DM;
+ if (vout && !dm_vlgc) { /* VDAT_REF < DM < VLGC */
+ if (line_state) { /* DP > VLGC */
+ motg->chg_type = USB_NONCOMPLIANT_CHARGER;
+ motg->chg_state = USB_CHG_STATE_DETECTED;
+ } else {
+ msm_chg_enable_secondary_det(motg);
+ delay = MSM_CHG_SECONDARY_DET_TIME;
+ motg->chg_state = USB_CHG_STATE_PRIMARY_DONE;
+ }
+ } else { /* DM < VDAT_REF || DM > VLGC */
+ if (line_state) /* DP > VLGC or/and DM > VLGC */
+ motg->chg_type = USB_NONCOMPLIANT_CHARGER;
+ else if (!dcd && floated_charger_enable)
+ motg->chg_type = USB_FLOATED_CHARGER;
+ else
+ motg->chg_type = USB_SDP_CHARGER;
+
+ motg->chg_state = USB_CHG_STATE_DETECTED;
+ }
+ break;
+ case USB_CHG_STATE_PRIMARY_DONE:
+ if (!motg->vbus_state) {
+ motg->chg_state = USB_CHG_STATE_IN_PROGRESS;
+ break;
+ }
+
+ vout = msm_chg_check_secondary_det(motg);
+ if (vout)
+ motg->chg_type = USB_DCP_CHARGER;
+ else
+ motg->chg_type = USB_CDP_CHARGER;
+ motg->chg_state = USB_CHG_STATE_SECONDARY_DONE;
+ /* fall through */
+ case USB_CHG_STATE_SECONDARY_DONE:
+ motg->chg_state = USB_CHG_STATE_DETECTED;
+ case USB_CHG_STATE_DETECTED:
+ if (!motg->vbus_state) {
+ motg->chg_state = USB_CHG_STATE_IN_PROGRESS;
+ break;
+ }
+
+ msm_chg_block_off(motg);
+
+ /* Enable VDP_SRC in case of DCP charger */
+ if (motg->chg_type == USB_DCP_CHARGER) {
+ ulpi_write(phy, 0x2, 0x85);
+ msm_otg_notify_charger(motg, dcp_max_current);
+ } else if (motg->chg_type == USB_NONCOMPLIANT_CHARGER)
+ msm_otg_notify_charger(motg, dcp_max_current);
+ else if (motg->chg_type == USB_FLOATED_CHARGER ||
+ motg->chg_type == USB_CDP_CHARGER)
+ msm_otg_notify_charger(motg, IDEV_CHG_MAX);
+
+ msm_otg_dbg_log_event(phy, "CHG WORK PUT: CHG_TYPE",
+ motg->chg_type, get_pm_runtime_counter(phy->dev));
+ /* to match _get at the start of chg_det_work */
+ pm_runtime_mark_last_busy(phy->dev);
+ pm_runtime_put_autosuspend(phy->dev);
+ motg->chg_state = USB_CHG_STATE_QUEUE_SM_WORK;
+ break;
+ case USB_CHG_STATE_QUEUE_SM_WORK:
+ if (!motg->vbus_state) {
+ pm_runtime_get_sync(phy->dev);
+ /* Turn off VDP_SRC if charger is DCP type */
+ if (motg->chg_type == USB_DCP_CHARGER)
+ ulpi_write(phy, 0x2, 0x86);
+
+ motg->chg_state = USB_CHG_STATE_UNDEFINED;
+ if (motg->chg_type == USB_SDP_CHARGER ||
+ motg->chg_type == USB_CDP_CHARGER)
+ queue_sm_work = true;
+
+ motg->chg_type = USB_INVALID_CHARGER;
+ msm_otg_notify_charger(motg, 0);
+ motg->cur_power = 0;
+ msm_chg_block_off(motg);
+ pm_runtime_mark_last_busy(phy->dev);
+ pm_runtime_put_autosuspend(phy->dev);
+ if (queue_sm_work)
+ queue_work(motg->otg_wq, &motg->sm_work);
+ else
+ return;
+ }
+
+ if (motg->chg_type == USB_CDP_CHARGER ||
+ motg->chg_type == USB_SDP_CHARGER)
+ queue_work(motg->otg_wq, &motg->sm_work);
+
+ return;
+ default:
+ return;
+ }
+
+ msm_otg_dbg_log_event(phy, "CHG WORK: QUEUE", motg->chg_type, delay);
+ queue_delayed_work(motg->otg_wq, &motg->chg_work, delay);
+}
+
/*
* We support OTG, Peripheral only and Host only configurations. In case
* of OTG, mode switch (host-->peripheral/peripheral-->host) can happen
@@ -2325,10 +2685,17 @@
else
clear_bit(ID, &motg->inputs);
} else if (motg->phy_irq) {
- if (msm_otg_read_phy_id_state(motg))
+ if (msm_otg_read_phy_id_state(motg)) {
set_bit(ID, &motg->inputs);
- else
+ if (pdata->phy_id_high_as_peripheral)
+ set_bit(B_SESS_VLD,
+ &motg->inputs);
+ } else {
clear_bit(ID, &motg->inputs);
+ if (pdata->phy_id_high_as_peripheral)
+ clear_bit(B_SESS_VLD,
+ &motg->inputs);
+ }
}
}
break;
@@ -2374,32 +2741,31 @@
static void msm_otg_sm_work(struct work_struct *w)
{
struct msm_otg *motg = container_of(w, struct msm_otg, sm_work);
+ struct usb_phy *phy = &motg->phy;
struct usb_otg *otg = motg->phy.otg;
struct device *dev = otg->usb_phy->dev;
bool work = 0;
int ret;
pr_debug("%s work\n", usb_otg_state_string(otg->state));
- msm_otg_dbg_log_event(&motg->phy, "SM WORK:",
- otg->state, motg->inputs);
+ msm_otg_dbg_log_event(phy, "SM WORK:", otg->state, motg->inputs);
/* Just resume h/w if reqd, pm_count is handled based on state/inputs */
if (motg->resume_pending) {
- pm_runtime_get_sync(otg->usb_phy->dev);
+ pm_runtime_get_sync(dev);
if (atomic_read(&motg->in_lpm)) {
dev_err(dev, "SM WORK: USB is in LPM\n");
- msm_otg_dbg_log_event(&motg->phy,
- "SM WORK: USB IS IN LPM",
+ msm_otg_dbg_log_event(phy, "SM WORK: USB IS IN LPM",
otg->state, motg->inputs);
msm_otg_resume(motg);
}
motg->resume_pending = false;
- pm_runtime_put_noidle(otg->usb_phy->dev);
+ pm_runtime_put_noidle(dev);
}
switch (otg->state) {
case OTG_STATE_UNDEFINED:
- pm_runtime_get_sync(otg->usb_phy->dev);
+ pm_runtime_get_sync(dev);
msm_otg_reset(otg->usb_phy);
/* Add child device only after block reset */
ret = of_platform_populate(motg->pdev->dev.of_node, NULL, NULL,
@@ -2411,21 +2777,20 @@
otg->state = OTG_STATE_B_IDLE;
if (!test_bit(B_SESS_VLD, &motg->inputs) &&
test_bit(ID, &motg->inputs)) {
- msm_otg_dbg_log_event(&motg->phy,
- "PM RUNTIME: UNDEF PUT",
- get_pm_runtime_counter(otg->usb_phy->dev), 0);
- pm_runtime_put_sync(otg->usb_phy->dev);
+ msm_otg_dbg_log_event(phy, "PM RUNTIME: UNDEF PUT",
+ get_pm_runtime_counter(dev), 0);
+ pm_runtime_put_sync(dev);
break;
}
- pm_runtime_put(otg->usb_phy->dev);
+ pm_runtime_put(dev);
/* FALL THROUGH */
case OTG_STATE_B_IDLE:
if (!test_bit(ID, &motg->inputs) && otg->host) {
pr_debug("!id\n");
- msm_otg_dbg_log_event(&motg->phy, "!ID",
+ msm_otg_dbg_log_event(phy, "!ID",
motg->inputs, otg->state);
if (!otg->host) {
- msm_otg_dbg_log_event(&motg->phy,
+ msm_otg_dbg_log_event(phy,
"SM WORK: Host Not Set",
otg->state, motg->inputs);
break;
@@ -2435,10 +2800,10 @@
otg->state = OTG_STATE_A_HOST;
} else if (test_bit(B_SESS_VLD, &motg->inputs)) {
pr_debug("b_sess_vld\n");
- msm_otg_dbg_log_event(&motg->phy, "B_SESS_VLD",
+ msm_otg_dbg_log_event(phy, "B_SESS_VLD",
motg->inputs, otg->state);
if (!otg->gadget) {
- msm_otg_dbg_log_event(&motg->phy,
+ msm_otg_dbg_log_event(phy,
"SM WORK: Gadget Not Set",
otg->state, motg->inputs);
break;
@@ -2446,25 +2811,24 @@
if (get_psy_type(motg) == POWER_SUPPLY_TYPE_USB_FLOAT)
queue_delayed_work(motg->otg_wq,
- &motg->sdp_check,
- msecs_to_jiffies(SDP_CHECK_DELAY_MS));
+ &motg->sdp_check,
+ msecs_to_jiffies(SDP_CHECK_DELAY_MS));
pm_runtime_get_sync(otg->usb_phy->dev);
msm_otg_start_peripheral(otg, 1);
otg->state = OTG_STATE_B_PERIPHERAL;
} else {
pr_debug("Cable disconnected\n");
- msm_otg_dbg_log_event(&motg->phy, "RT: Cable DISC",
- get_pm_runtime_counter(otg->usb_phy->dev), 0);
-
- msm_otg_notify_chg_current(motg, 0);
+ msm_otg_dbg_log_event(phy, "RT: Cable DISC",
+ get_pm_runtime_counter(dev), 0);
+ msm_otg_notify_charger(motg, 0);
}
break;
case OTG_STATE_B_PERIPHERAL:
if (!test_bit(B_SESS_VLD, &motg->inputs)) {
cancel_delayed_work_sync(&motg->sdp_check);
msm_otg_start_peripheral(otg, 0);
- msm_otg_dbg_log_event(&motg->phy, "RT PM: B_PERI A PUT",
+ msm_otg_dbg_log_event(phy, "RT PM: B_PERI A PUT",
get_pm_runtime_counter(dev), 0);
/* _put for _get done on cable connect in B_IDLE */
pm_runtime_mark_last_busy(dev);
@@ -2474,8 +2838,7 @@
work = 1;
} else if (test_bit(A_BUS_SUSPEND, &motg->inputs)) {
pr_debug("a_bus_suspend\n");
- msm_otg_dbg_log_event(&motg->phy,
- "BUS_SUSPEND: PM RT PUT",
+ msm_otg_dbg_log_event(phy, "BUS_SUSPEND: PM RT PUT",
get_pm_runtime_counter(dev), 0);
otg->state = OTG_STATE_B_SUSPEND;
/* _get on connect in B_IDLE or host resume in B_SUSP */
@@ -2493,8 +2856,7 @@
} else if (!test_bit(A_BUS_SUSPEND, &motg->inputs)) {
pr_debug("!a_bus_suspend\n");
otg->state = OTG_STATE_B_PERIPHERAL;
- msm_otg_dbg_log_event(&motg->phy,
- "BUS_RESUME: PM RT GET",
+ msm_otg_dbg_log_event(phy, "BUS_RESUME: PM RT GET",
get_pm_runtime_counter(dev), 0);
pm_runtime_get_sync(dev);
}
@@ -2611,7 +2973,16 @@
else
set_bit(ID, &motg->inputs);
}
- msm_otg_kick_sm_work(motg);
+
+ if (test_bit(B_SESS_VLD, &motg->inputs) &&
+ get_psy_type(motg) == POWER_SUPPLY_TYPE_UNKNOWN &&
+ !motg->chg_detection)
+ motg->chg_detection = true;
+
+ if (motg->chg_detection)
+ queue_delayed_work(motg->otg_wq, &motg->chg_work, 0);
+ else
+ msm_otg_kick_sm_work(motg);
}
static void msm_id_status_w(struct work_struct *w)
@@ -2637,6 +3008,8 @@
gpio_direction_input(motg->pdata->switch_sel_gpio);
if (!test_and_set_bit(ID, &motg->inputs)) {
pr_debug("ID set\n");
+ if (motg->pdata->phy_id_high_as_peripheral)
+ set_bit(B_SESS_VLD, &motg->inputs);
msm_otg_dbg_log_event(&motg->phy, "ID SET",
motg->inputs, motg->phy.otg->state);
work = 1;
@@ -2646,6 +3019,8 @@
gpio_direction_output(motg->pdata->switch_sel_gpio, 1);
if (test_and_clear_bit(ID, &motg->inputs)) {
pr_debug("ID clear\n");
+ if (motg->pdata->phy_id_high_as_peripheral)
+ clear_bit(B_SESS_VLD, &motg->inputs);
msm_otg_dbg_log_event(&motg->phy, "ID CLEAR",
motg->inputs, motg->phy.otg->state);
work = 1;
@@ -3424,6 +3799,10 @@
pdata->vbus_low_as_hostmode = of_property_read_bool(node,
"qcom,vbus-low-as-hostmode");
+
+ pdata->phy_id_high_as_peripheral = of_property_read_bool(node,
+ "qcom,phy-id-high-as-peripheral");
+
return pdata;
}
@@ -3865,11 +4244,11 @@
motg->id_state = USB_ID_FLOAT;
set_bit(ID, &motg->inputs);
INIT_WORK(&motg->sm_work, msm_otg_sm_work);
+ INIT_DELAYED_WORK(&motg->chg_work, msm_chg_detect_work);
INIT_DELAYED_WORK(&motg->id_status_work, msm_id_status_w);
INIT_DELAYED_WORK(&motg->perf_vote_work, msm_otg_perf_vote_work);
INIT_DELAYED_WORK(&motg->sdp_check, check_for_sdp_connection);
- INIT_WORK(&motg->notify_chg_current_work,
- msm_otg_notify_chg_current_work);
+ INIT_WORK(&motg->notify_charger_work, msm_otg_notify_charger_work);
motg->otg_wq = alloc_ordered_workqueue("k_otg", 0);
if (!motg->otg_wq) {
pr_err("%s: Unable to create workqueue otg_wq\n",
@@ -4220,12 +4599,13 @@
if (psy)
power_supply_put(psy);
msm_otg_debugfs_cleanup();
+ cancel_delayed_work_sync(&motg->chg_work);
cancel_delayed_work_sync(&motg->sdp_check);
cancel_delayed_work_sync(&motg->id_status_work);
cancel_delayed_work_sync(&motg->perf_vote_work);
msm_otg_perf_vote_update(motg, false);
cancel_work_sync(&motg->sm_work);
- cancel_work_sync(&motg->notify_chg_current_work);
+ cancel_work_sync(&motg->notify_charger_work);
destroy_workqueue(motg->otg_wq);
pm_runtime_resume(&pdev->dev);
diff --git a/drivers/video/fbdev/msm/mdp3_ctrl.c b/drivers/video/fbdev/msm/mdp3_ctrl.c
index 2d13210..50b232a 100644
--- a/drivers/video/fbdev/msm/mdp3_ctrl.c
+++ b/drivers/video/fbdev/msm/mdp3_ctrl.c
@@ -174,6 +174,7 @@
return;
mutex_lock(&session->lock);
+ MDSS_XLOG(0x111);
if (session->vsync_enabled ||
atomic_read(&session->vsync_countdown) > 0) {
mutex_unlock(&session->lock);
@@ -182,10 +183,17 @@
return;
}
+ if (!session->clk_on) {
+ mutex_unlock(&session->lock);
+ pr_debug("%s: Clk shut down is done\n", __func__);
+ MDSS_XLOG(XLOG_FUNC_EXIT, __LINE__);
+ return;
+ }
if (session->intf->active) {
retry_dma_done:
rc = wait_for_completion_timeout(&session->dma_completion,
WAIT_DMA_TIMEOUT);
+ MDSS_XLOG(0x222);
if (rc <= 0) {
struct mdss_panel_data *panel;
@@ -196,6 +204,7 @@
if (--retry_count) {
pr_err("dmap is busy, retry %d\n",
retry_count);
+ MDSS_XLOG(__LINE__, retry_count);
goto retry_dma_done;
}
pr_err("dmap is still busy, bug_on\n");
@@ -380,7 +389,7 @@
return -EFAULT;
p_req = p + sizeof(req_list_header);
count = req_list_header.count;
- if (count < 0 || count >= MAX_BLIT_REQ)
+ if (count < 0 || count > MAX_BLIT_REQ)
return -EINVAL;
rc = mdp3_ppp_parse_req(p_req, &req_list_header, 1);
if (!rc)
@@ -399,7 +408,7 @@
return -EFAULT;
p_req = p + sizeof(struct mdp_blit_req_list);
count = req_list_header.count;
- if (count < 0 || count >= MAX_BLIT_REQ)
+ if (count < 0 || count > MAX_BLIT_REQ)
return -EINVAL;
req_list_header.sync.acq_fen_fd_cnt = 0;
rc = mdp3_ppp_parse_req(p_req, &req_list_header, 0);
@@ -661,8 +670,11 @@
case MDP_RGB_888:
format = MDP3_DMA_IBUF_FORMAT_RGB888;
break;
+ case MDP_XRGB_8888:
case MDP_ARGB_8888:
case MDP_RGBA_8888:
+ case MDP_BGRA_8888:
+ case MDP_RGBX_8888:
format = MDP3_DMA_IBUF_FORMAT_XRGB8888;
break;
default:
@@ -1014,8 +1026,10 @@
MDSS_XLOG(XLOG_FUNC_ENTRY, __LINE__, mdss_fb_is_power_on_ulp(mfd),
mfd->panel_power_state);
panel = mdp3_session->panel;
- mutex_lock(&mdp3_session->lock);
+ cancel_work_sync(&mdp3_session->clk_off_work);
+ mutex_lock(&mdp3_session->lock);
+ MDSS_XLOG(0x111);
pr_debug("Requested power state = %d\n", mfd->panel_power_state);
if (mdss_fb_is_power_on_lp(mfd)) {
/*
@@ -1482,6 +1496,7 @@
}
mutex_unlock(&mdp3_res->fs_idle_pc_lock);
+ cancel_work_sync(&mdp3_session->clk_off_work);
mutex_lock(&mdp3_session->lock);
if (!mdp3_session->status) {
@@ -1489,7 +1504,7 @@
mutex_unlock(&mdp3_session->lock);
return -EPERM;
}
-
+ MDSS_XLOG(0x111);
mdp3_ctrl_notify(mdp3_session, MDP_NOTIFY_FRAME_BEGIN);
data = mdp3_bufq_pop(&mdp3_session->bufq_in);
if (data) {
@@ -1521,7 +1536,7 @@
}
}
mdp3_session->dma_active = 1;
- init_completion(&mdp3_session->dma_completion);
+ reinit_completion(&mdp3_session->dma_completion);
mdp3_ctrl_notify(mdp3_session, MDP_NOTIFY_FRAME_FLUSHED);
mdp3_bufq_push(&mdp3_session->bufq_out, data);
}
@@ -1670,7 +1685,7 @@
}
}
mdp3_session->dma_active = 1;
- init_completion(&mdp3_session->dma_completion);
+ reinit_completion(&mdp3_session->dma_completion);
mdp3_ctrl_notify(mdp3_session, MDP_NOTIFY_FRAME_FLUSHED);
} else {
pr_debug("mdp3_ctrl_pan_display no memory, stop interface");
@@ -2711,6 +2726,8 @@
}
mutex_unlock(&mdp3_res->fs_idle_pc_lock);
rc = mdp3_ctrl_async_blit_req(mfd, argp);
+ if (!rc)
+ cancel_work_sync(&mdp3_session->clk_off_work);
break;
case MSMFB_BLIT:
mutex_lock(&mdp3_res->fs_idle_pc_lock);
@@ -2718,6 +2735,8 @@
mdp3_ctrl_reset(mfd);
mutex_unlock(&mdp3_res->fs_idle_pc_lock);
rc = mdp3_ctrl_blit_req(mfd, argp);
+ if (!rc)
+ cancel_work_sync(&mdp3_session->clk_off_work);
break;
case MSMFB_METADATA_GET:
rc = copy_from_user(&metadata, argp, sizeof(metadata));
diff --git a/drivers/video/fbdev/msm/mdp3_dma.c b/drivers/video/fbdev/msm/mdp3_dma.c
index 089d32d..71fcbf9 100644
--- a/drivers/video/fbdev/msm/mdp3_dma.c
+++ b/drivers/video/fbdev/msm/mdp3_dma.c
@@ -665,6 +665,7 @@
ATRACE_BEGIN(__func__);
pr_debug("mdp3_dmap_update\n");
+ MDSS_XLOG(XLOG_FUNC_ENTRY, __LINE__);
if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
cb_type = MDP3_DMA_CALLBACK_TYPE_DMA_DONE;
if (intf->active) {
@@ -757,6 +758,7 @@
unsigned long flag;
int cb_type = MDP3_DMA_CALLBACK_TYPE_VSYNC;
+ MDSS_XLOG(XLOG_FUNC_ENTRY, __LINE__);
if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
cb_type = MDP3_DMA_CALLBACK_TYPE_DMA_DONE;
if (intf->active)
@@ -965,6 +967,8 @@
val = MDP3_REG_READ(MDP3_REG_DISPLAY_STATUS);
pr_err("%s DMAP Status %s\n", __func__,
(val & MDP3_DMA_P_BUSY_BIT) ? "BUSY":"IDLE");
+ MDSS_XLOG(XLOG_FUNC_ENTRY, __LINE__,
+ (val & MDP3_DMA_P_BUSY_BIT) ? 1:0);
return val & MDP3_DMA_P_BUSY_BIT;
}
@@ -1056,7 +1060,7 @@
MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, 0);
MDP3_REG_WRITE(MDP3_REG_INTR_CLEAR, 0xfffffff);
- init_completion(&dma->dma_comp);
+ reinit_completion(&dma->dma_comp);
dma->vsync_client.handler = NULL;
return ret;
}
diff --git a/drivers/video/fbdev/msm/mdp3_ppp.c b/drivers/video/fbdev/msm/mdp3_ppp.c
index 6095073..3391059 100644
--- a/drivers/video/fbdev/msm/mdp3_ppp.c
+++ b/drivers/video/fbdev/msm/mdp3_ppp.c
@@ -1462,6 +1462,18 @@
(!(check_if_rgb(bg_req.src.format))) &&
(!(hw_woraround_active))) {
/*
+ * Disable SMART blit for BG(YUV) layer when
+ * Scaling on BG layer
+ * Rotation on BG layer
+ * UD flip on BG layer
+ */
+ if ((is_scaling_needed(bg_req)) && (
+ bg_req.flags & MDP_ROT_90) &&
+ (bg_req.flags & MDP_FLIP_UD)) {
+ pr_debug("YUV layer with ROT+UD_FLIP+Scaling Not supported\n");
+ return false;
+ }
+ /*
* swap blit requests at index 0 and 1. YUV layer at
* index 0 is replaced with UI layer request present
* at index 1. Since UI layer will be in background
diff --git a/drivers/video/fbdev/msm/mdss_dsi.h b/drivers/video/fbdev/msm/mdss_dsi.h
index 5e921ff..2ff2951 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.h
+++ b/drivers/video/fbdev/msm/mdss_dsi.h
@@ -685,6 +685,7 @@
u32 mask, u32 val);
int mdss_dsi_phy_pll_reset_status(struct mdss_dsi_ctrl_pdata *ctrl);
int mdss_dsi_panel_power_ctrl(struct mdss_panel_data *pdata, int power_state);
+void mdss_dsi_ctrl_phy_reset(struct mdss_dsi_ctrl_pdata *ctrl);
void mdss_dsi_debug_bus_init(struct mdss_dsi_data *sdata);
diff --git a/drivers/video/fbdev/msm/mdss_dsi_host.c b/drivers/video/fbdev/msm/mdss_dsi_host.c
index f425620..e221c1c 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_host.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_host.c
@@ -82,6 +82,8 @@
static struct mdss_dsi_event dsi_event;
static int dsi_event_thread(void *data);
+static void dsi_send_events(struct mdss_dsi_ctrl_pdata *ctrl,
+ u32 events, u32 arg);
void mdss_dsi_ctrl_init(struct device *ctrl_dev,
struct mdss_dsi_ctrl_pdata *ctrl)
@@ -820,8 +822,10 @@
* Disable PHY contention detection and receive.
* Configure the strength ctrl 1 register.
*/
- MIPI_OUTP((ctrl0->phy_io.base) + 0x0188, 0);
- MIPI_OUTP((ctrl1->phy_io.base) + 0x0188, 0);
+ if (ctrl0->shared_data->phy_rev != DSI_PHY_REV_12NM) {
+ MIPI_OUTP((ctrl0->phy_io.base) + 0x0188, 0);
+ MIPI_OUTP((ctrl1->phy_io.base) + 0x0188, 0);
+ }
data0 = MIPI_INP(ctrl0->ctrl_base + 0x0004);
data1 = MIPI_INP(ctrl1->ctrl_base + 0x0004);
@@ -876,6 +880,18 @@
udelay(u_dly);
}
if (i == loop) {
+ if ((ctrl0->shared_data->phy_rev == DSI_PHY_REV_12NM) &&
+ (event == DSI_EV_LP_RX_TIMEOUT)) {
+ struct mdss_panel_info *pinfo =
+ &ctrl0->panel_data.panel_info;
+ /* If ESD is not enabled, report panel dead */
+ if (!pinfo->esd_check_enabled &&
+ ctrl0->recovery)
+ ctrl0->recovery->fxn(
+ ctrl0->recovery->data,
+ MDP_INTF_DSI_PANEL_DEAD);
+ return;
+ }
MDSS_XLOG(ctrl0->ndx, ln0, 0x1f1f);
MDSS_XLOG(ctrl1->ndx, ln1, 0x1f1f);
pr_err("%s: Clock lane still in stop state\n",
@@ -896,13 +912,20 @@
* Enable PHY contention detection and receive.
* Configure the strength ctrl 1 register.
*/
- MIPI_OUTP((ctrl0->phy_io.base) + 0x0188, 0x6);
- MIPI_OUTP((ctrl1->phy_io.base) + 0x0188, 0x6);
+ if (ctrl0->shared_data->phy_rev != DSI_PHY_REV_12NM) {
+ MIPI_OUTP((ctrl0->phy_io.base) + 0x0188, 0x6);
+ MIPI_OUTP((ctrl1->phy_io.base) + 0x0188, 0x6);
+ }
/*
* Add sufficient delay to make sure
* pixel transmission as started
*/
udelay(200);
+ /* Un-mask LP_RX_TIMEOUT error if recovery successful */
+ if (event == DSI_EV_LP_RX_TIMEOUT) {
+ mdss_dsi_set_reg(ctrl0, 0x10c, BIT(5), 0);
+ mdss_dsi_set_reg(ctrl1, 0x10c, BIT(5), 0);
+ }
} else {
if (ctrl->recovery) {
rc = ctrl->recovery->fxn(ctrl->recovery->data,
@@ -914,7 +937,8 @@
}
}
/* Disable PHY contention detection and receive */
- MIPI_OUTP((ctrl->phy_io.base) + 0x0188, 0);
+ if (ctrl->shared_data->phy_rev != DSI_PHY_REV_12NM)
+ MIPI_OUTP((ctrl->phy_io.base) + 0x0188, 0);
data0 = MIPI_INP(ctrl->ctrl_base + 0x0004);
/* Disable DSI video mode */
@@ -955,6 +979,17 @@
udelay(u_dly);
}
if (i == loop) {
+ if ((ctrl->shared_data->phy_rev == DSI_PHY_REV_12NM) &&
+ (event == DSI_EV_LP_RX_TIMEOUT)) {
+ struct mdss_panel_info *pinfo =
+ &ctrl->panel_data.panel_info;
+ /* If ESD is not enabled, report panel dead */
+ if (!pinfo->esd_check_enabled && ctrl->recovery)
+ ctrl->recovery->fxn(
+ ctrl->recovery->data,
+ MDP_INTF_DSI_PANEL_DEAD);
+ return;
+ }
MDSS_XLOG(ctrl->ndx, ln0, 0x1f1f);
pr_err("%s: Clock lane still in stop state\n",
__func__);
@@ -968,12 +1003,16 @@
/* Enable Video mode for DSI controller */
MIPI_OUTP(ctrl->ctrl_base + 0x004, data0);
/* Enable PHY contention detection and receiver */
- MIPI_OUTP((ctrl->phy_io.base) + 0x0188, 0x6);
+ if (ctrl->shared_data->phy_rev != DSI_PHY_REV_12NM)
+ MIPI_OUTP((ctrl->phy_io.base) + 0x0188, 0x6);
/*
* Add sufficient delay to make sure
* pixel transmission as started
*/
udelay(200);
+ /* Un-mask LP_RX_TIMEOUT error if recovery successful */
+ if (event == DSI_EV_LP_RX_TIMEOUT)
+ mdss_dsi_set_reg(ctrl, 0x10c, BIT(5), 0);
}
pr_debug("Recovery done\n");
}
@@ -1513,15 +1552,45 @@
ret = wait_for_completion_killable_timeout(&ctrl_pdata->bta_comp,
DSI_BTA_EVENT_TIMEOUT);
if (ret <= 0) {
- mdss_dsi_disable_irq(ctrl_pdata, DSI_BTA_TERM);
- pr_err("%s: DSI BTA error: %i\n", __func__, ret);
+ u32 reg_val, status;
+
+ reg_val = MIPI_INP(ctrl_pdata->ctrl_base + 0x0110);
+ status = reg_val & DSI_INTR_BTA_DONE;
+ if (status) {
+ reg_val &= DSI_INTR_MASK_ALL;
+ /* clear BTA_DONE isr only */
+ reg_val |= DSI_INTR_BTA_DONE;
+ MIPI_OUTP(ctrl_pdata->ctrl_base + 0x0110, reg_val);
+ mdss_dsi_disable_irq(ctrl_pdata, DSI_BTA_TERM);
+ complete(&ctrl_pdata->bta_comp);
+ ret = 1;
+ pr_warn("%s: bta done but irq not triggered\n",
+ __func__);
+ } else {
+ pr_err("%s: DSI BTA error: %i\n", __func__, ret);
+ /*
+ * For 12nm DSI PHY, BTA_TO interrupt may not trigger.
+ * Treat software timer timeout as BTA_TO.
+ */
+ if (ctrl_pdata->shared_data->phy_rev ==
+ DSI_PHY_REV_12NM) {
+ /* Mask BTA_TIMEOUT/LP_RX_TIMEOUT error */
+ mdss_dsi_set_reg(ctrl_pdata, 0x10c,
+ (BIT(5) | BIT(7)), (BIT(5) | BIT(7)));
+ dsi_send_events(ctrl_pdata,
+ DSI_EV_LP_RX_TIMEOUT, 0);
+ }
+ ret = -ETIMEDOUT;
+ }
}
if (ignore_underflow) {
+ u32 data = MIPI_INP((ctrl_pdata->ctrl_base) + 0x10c);
/* clear pending overflow status */
mdss_dsi_set_reg(ctrl_pdata, 0xc, 0xffffffff, 0x44440000);
- /* restore overflow isr */
- mdss_dsi_set_reg(ctrl_pdata, 0x10c, 0x0f0000, 0);
+ /* restore overflow isr if LP_RX_TO not masked*/
+ if (!(data & BIT(5)))
+ mdss_dsi_set_reg(ctrl_pdata, 0x10c, 0x0f0000, 0);
}
mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
@@ -2164,10 +2233,12 @@
if (mctrl && mctrl->dma_addr) {
if (ignored) {
+ u32 data = MIPI_INP((mctrl->ctrl_base) + 0x10c);
/* clear pending overflow status */
mdss_dsi_set_reg(mctrl, 0xc, 0xffffffff, 0x44440000);
- /* restore overflow isr */
- mdss_dsi_set_reg(mctrl, 0x10c, 0x0f0000, 0);
+ /* restore overflow isr if LP_RX_TO not masked*/
+ if (!(data & BIT(5)))
+ mdss_dsi_set_reg(mctrl, 0x10c, 0x0f0000, 0);
}
if (mctrl->dmap_iommu_map) {
mdss_smmu_dsi_unmap_buffer(mctrl->dma_addr, domain,
@@ -2185,10 +2256,12 @@
}
if (ignored) {
+ u32 data = MIPI_INP((ctrl->ctrl_base) + 0x10c);
/* clear pending overflow status */
mdss_dsi_set_reg(ctrl, 0xc, 0xffffffff, 0x44440000);
- /* restore overflow isr */
- mdss_dsi_set_reg(ctrl, 0x10c, 0x0f0000, 0);
+ /* restore overflow isr if LP_RX_TO/BTA_TO not masked*/
+ if (!(data & BIT(5)))
+ mdss_dsi_set_reg(ctrl, 0x10c, 0x0f0000, 0);
}
ctrl->dma_addr = 0;
ctrl->dma_size = 0;
@@ -3024,8 +3097,12 @@
if (status & 0x0111) {
MIPI_OUTP(base + 0x00c0, status);
- if (status & 0x0110)
+ if (status & 0x0110) {
+ /* Mask BTA_TIMEOUT/LP_RX_TIMEOUT error */
+ mdss_dsi_set_reg(ctrl, 0x10c,
+ (BIT(5) | BIT(7)), (BIT(5) | BIT(7)));
dsi_send_events(ctrl, DSI_EV_LP_RX_TIMEOUT, 0);
+ }
pr_err("%s: status=%x\n", __func__, status);
ret = true;
}
diff --git a/drivers/video/fbdev/msm/mdss_dsi_panel.c b/drivers/video/fbdev/msm/mdss_dsi_panel.c
index e586667..85ed42e 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_panel.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_panel.c
@@ -24,6 +24,7 @@
#include <linux/string.h>
#include "mdss_dsi.h"
+#include "mdss_debug.h"
#ifdef TARGET_HW_MDSS_HDMI
#include "mdss_dba_utils.h"
#endif
@@ -260,11 +261,12 @@
ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata,
panel_data);
- pr_debug("%s: Idle (%d->%d)\n", __func__, ctrl->idle, enable);
+ pr_info("%s: Idle (%d->%d)\n", __func__, ctrl->idle, enable);
if (ctrl->idle == enable)
return;
+ MDSS_XLOG(ctrl->idle, enable);
if (enable) {
if (ctrl->idle_on_cmds.cmd_cnt) {
mdss_dsi_panel_cmds_send(ctrl, &ctrl->idle_on_cmds,
diff --git a/drivers/video/fbdev/msm/mdss_dsi_phy_12nm.c b/drivers/video/fbdev/msm/mdss_dsi_phy_12nm.c
index 9bd2c4d..ec179e8 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_phy_12nm.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_phy_12nm.c
@@ -107,6 +107,7 @@
{
DSI_PHY_W32(ctrl->phy_io.base, SYS_CTRL, BIT(0) | BIT(3));
wmb(); /* make sure DSI PHY is disabled */
+ mdss_dsi_ctrl_phy_reset(ctrl);
return 0;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_debug.c b/drivers/video/fbdev/msm/mdss_mdp_debug.c
index 6024ea1..9332f49 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_debug.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_debug.c
@@ -955,6 +955,11 @@
mdata->dbg_bus_size = 0;
switch (mdata->mdp_rev) {
+ case MDSS_MDP_HW_REV_114:
+ case MDSS_MDP_HW_REV_116:
+ mdata->dbg_bus = dbg_bus_8996;
+ mdata->dbg_bus_size = ARRAY_SIZE(dbg_bus_8996);
+ break;
case MDSS_MDP_HW_REV_107:
case MDSS_MDP_HW_REV_107_1:
case MDSS_MDP_HW_REV_107_2:
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
index b49e954..c002ba4 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
@@ -1135,16 +1135,21 @@
return -EINVAL;
/*
- * Currently, only intf_fifo_underflow is
+ * Currently, intf_fifo_overflow is not
* supported for recovery sequence for command
* mode DSI interface
*/
- if (event != MDP_INTF_DSI_CMD_FIFO_UNDERFLOW) {
+ if (event == MDP_INTF_DSI_VIDEO_FIFO_OVERFLOW) {
pr_warn("%s: unsupported recovery event:%d\n",
__func__, event);
return -EPERM;
}
+ if (event == MDP_INTF_DSI_PANEL_DEAD) {
+ mdss_fb_report_panel_dead(ctx->ctl->mfd);
+ return 0;
+ }
+
if (atomic_read(&ctx->koff_cnt)) {
mdss_mdp_ctl_reset(ctx->ctl, true);
reset_done = true;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
index f18987c..591fa38 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
@@ -327,11 +327,11 @@
}
/*
- * Currently, only intf_fifo_overflow is
+ * Currently, intf_fifo_underflow is not
* supported for recovery sequence for video
* mode DSI interface
*/
- if (event != MDP_INTF_DSI_VIDEO_FIFO_OVERFLOW) {
+ if (event == MDP_INTF_DSI_CMD_FIFO_UNDERFLOW) {
pr_warn("%s: unsupported recovery event:%d\n",
__func__, event);
return -EPERM;
@@ -341,6 +341,11 @@
pr_debug("%s: ctl num = %d, event = %d\n",
__func__, ctl->num, event);
+ if (event == MDP_INTF_DSI_PANEL_DEAD) {
+ mdss_fb_report_panel_dead(ctx->ctl->mfd);
+ return 0;
+ }
+
pinfo = &ctl->panel_data->panel_info;
clk_rate = ((ctl->intf_type == MDSS_INTF_DSI) ?
pinfo->mipi.dsi_pclk_rate :
diff --git a/drivers/video/fbdev/msm/mdss_panel.h b/drivers/video/fbdev/msm/mdss_panel.h
index c9e7e61..aa90d5f 100644
--- a/drivers/video/fbdev/msm/mdss_panel.h
+++ b/drivers/video/fbdev/msm/mdss_panel.h
@@ -167,6 +167,7 @@
#define MDP_INTF_DSI_CMD_FIFO_UNDERFLOW 0x0001
#define MDP_INTF_DSI_VIDEO_FIFO_OVERFLOW 0x0002
+#define MDP_INTF_DSI_PANEL_DEAD 0x0003
enum {
diff --git a/drivers/video/fbdev/msm/mdss_smmu.c b/drivers/video/fbdev/msm/mdss_smmu.c
index 972c8de..4ff8e90 100644
--- a/drivers/video/fbdev/msm/mdss_smmu.c
+++ b/drivers/video/fbdev/msm/mdss_smmu.c
@@ -693,13 +693,13 @@
}
static struct mdss_smmu_domain mdss_mdp_unsec = {
- "mdp_0", MDSS_IOMMU_DOMAIN_UNSECURE, SZ_128K, (SZ_4G - SZ_128M)};
+ "mdp_0", MDSS_IOMMU_DOMAIN_UNSECURE, SZ_128M, (SZ_4G - SZ_128M)};
static struct mdss_smmu_domain mdss_rot_unsec = {
- NULL, MDSS_IOMMU_DOMAIN_ROT_UNSECURE, SZ_128K, (SZ_4G - SZ_128M)};
+ NULL, MDSS_IOMMU_DOMAIN_ROT_UNSECURE, SZ_128M, (SZ_4G - SZ_128M)};
static struct mdss_smmu_domain mdss_mdp_sec = {
- "mdp_1", MDSS_IOMMU_DOMAIN_SECURE, SZ_128K, (SZ_4G - SZ_128M)};
+ "mdp_1", MDSS_IOMMU_DOMAIN_SECURE, SZ_128M, (SZ_4G - SZ_128M)};
static struct mdss_smmu_domain mdss_rot_sec = {
- NULL, MDSS_IOMMU_DOMAIN_ROT_SECURE, SZ_128K, (SZ_4G - SZ_128M)};
+ NULL, MDSS_IOMMU_DOMAIN_ROT_SECURE, SZ_128M, (SZ_4G - SZ_128M)};
static const struct of_device_id mdss_smmu_dt_match[] = {
{ .compatible = "qcom,smmu_mdp_unsec", .data = &mdss_mdp_unsec},
diff --git a/drivers/video/fbdev/msm/msm_mdss_io_8974.c b/drivers/video/fbdev/msm/msm_mdss_io_8974.c
index 153b39f..9ccd428 100644
--- a/drivers/video/fbdev/msm/msm_mdss_io_8974.c
+++ b/drivers/video/fbdev/msm/msm_mdss_io_8974.c
@@ -417,7 +417,7 @@
wmb(); /* make sure phy timings are updated*/
}
-static void mdss_dsi_ctrl_phy_reset(struct mdss_dsi_ctrl_pdata *ctrl)
+void mdss_dsi_ctrl_phy_reset(struct mdss_dsi_ctrl_pdata *ctrl)
{
/* start phy sw reset */
MIPI_OUTP(ctrl->ctrl_base + 0x12c, 0x0001);
@@ -519,8 +519,7 @@
* is only done from the clock master. This will ensure that the PLL is
* off when PHY reset is called.
*/
- if (mdss_dsi_is_ctrl_clk_slave(ctrl) ||
- (ctrl->shared_data->phy_rev == DSI_PHY_REV_12NM))
+ if (mdss_dsi_is_ctrl_clk_slave(ctrl))
return;
mdss_dsi_phy_sw_reset_sub(ctrl);
diff --git a/fs/buffer.c b/fs/buffer.c
index 5d8f496..3a8064d 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1455,12 +1455,48 @@
return 0;
}
+static void __evict_bh_lru(void *arg)
+{
+ struct bh_lru *b = &get_cpu_var(bh_lrus);
+ struct buffer_head *bh = arg;
+ int i;
+
+ for (i = 0; i < BH_LRU_SIZE; i++) {
+ if (b->bhs[i] == bh) {
+ brelse(b->bhs[i]);
+ b->bhs[i] = NULL;
+ goto out;
+ }
+ }
+out:
+ put_cpu_var(bh_lrus);
+}
+
+static bool bh_exists_in_lru(int cpu, void *arg)
+{
+ struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
+ struct buffer_head *bh = arg;
+ int i;
+
+ for (i = 0; i < BH_LRU_SIZE; i++) {
+ if (b->bhs[i] == bh)
+ return 1;
+ }
+
+ return 0;
+
+}
void invalidate_bh_lrus(void)
{
on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
+static void evict_bh_lrus(struct buffer_head *bh)
+{
+ on_each_cpu_cond(bh_exists_in_lru, __evict_bh_lru, bh, 1, GFP_ATOMIC);
+}
+
void set_bh_page(struct buffer_head *bh,
struct page *page, unsigned long offset)
{
@@ -3250,8 +3286,15 @@
do {
if (buffer_write_io_error(bh) && page->mapping)
mapping_set_error(page->mapping, -EIO);
- if (buffer_busy(bh))
- goto failed;
+ if (buffer_busy(bh)) {
+ /*
+ * Check if the busy failure was due to an
+ * outstanding LRU reference
+ */
+ evict_bh_lrus(bh);
+ if (buffer_busy(bh))
+ goto failed;
+ }
bh = bh->b_this_page;
} while (bh != head);
diff --git a/fs/crypto/Makefile b/fs/crypto/Makefile
index cb49698..cc42e5e 100644
--- a/fs/crypto/Makefile
+++ b/fs/crypto/Makefile
@@ -1,4 +1,7 @@
obj-$(CONFIG_FS_ENCRYPTION) += fscrypto.o
-fscrypto-y := crypto.o fname.o hooks.o keyinfo.o policy.o
+ccflags-y += -Ifs/ext4
+ccflags-y += -Ifs/f2fs
+
+fscrypto-y := crypto.o fname.o hooks.o keyinfo.o policy.o fscrypt_ice.o
fscrypto-$(CONFIG_BLOCK) += bio.o
diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index d7b4c48..c629e97 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -32,14 +32,18 @@
bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page;
- int ret = fscrypt_decrypt_page(page->mapping->host, page,
- PAGE_SIZE, 0, page->index);
- if (ret) {
- WARN_ON_ONCE(1);
- SetPageError(page);
- } else if (done) {
+ if (fscrypt_using_hardware_encryption(page->mapping->host)) {
SetPageUptodate(page);
+ } else {
+ int ret = fscrypt_decrypt_page(page->mapping->host,
+ page, PAGE_SIZE, 0, page->index);
+ if (ret) {
+ WARN_ON_ONCE(1);
+ SetPageError(page);
+ } else if (done) {
+ SetPageUptodate(page);
+ }
}
if (done)
unlock_page(page);
diff --git a/fs/crypto/fscrypt_ice.c b/fs/crypto/fscrypt_ice.c
new file mode 100644
index 0000000..62dae83
--- /dev/null
+++ b/fs/crypto/fscrypt_ice.c
@@ -0,0 +1,146 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "fscrypt_ice.h"
+
+int fscrypt_using_hardware_encryption(const struct inode *inode)
+{
+ struct fscrypt_info *ci = inode->i_crypt_info;
+
+ return S_ISREG(inode->i_mode) && ci &&
+ ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE;
+}
+EXPORT_SYMBOL(fscrypt_using_hardware_encryption);
+
+/*
+ * Retrieves encryption key from the inode
+ */
+char *fscrypt_get_ice_encryption_key(const struct inode *inode)
+{
+ struct fscrypt_info *ci = NULL;
+
+ if (!inode)
+ return NULL;
+
+ ci = inode->i_crypt_info;
+ if (!ci)
+ return NULL;
+
+ return &(ci->ci_raw_key[0]);
+}
+
+/*
+ * Retrieves encryption salt from the inode
+ */
+char *fscrypt_get_ice_encryption_salt(const struct inode *inode)
+{
+ struct fscrypt_info *ci = NULL;
+
+ if (!inode)
+ return NULL;
+
+ ci = inode->i_crypt_info;
+ if (!ci)
+ return NULL;
+
+ return &(ci->ci_raw_key[fscrypt_get_ice_encryption_key_size(inode)]);
+}
+
+/*
+ * returns true if the cipher mode in inode is AES XTS
+ */
+int fscrypt_is_aes_xts_cipher(const struct inode *inode)
+{
+ struct fscrypt_info *ci = inode->i_crypt_info;
+
+ if (!ci)
+ return 0;
+
+ return (ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE);
+}
+
+/*
+ * returns true if encryption info in both inodes is equal
+ */
+bool fscrypt_is_ice_encryption_info_equal(const struct inode *inode1,
+ const struct inode *inode2)
+{
+ char *key1 = NULL;
+ char *key2 = NULL;
+ char *salt1 = NULL;
+ char *salt2 = NULL;
+
+ if (!inode1 || !inode2)
+ return false;
+
+ if (inode1 == inode2)
+ return true;
+
+ /* both do not belong to ice, so we don't care, they are equal
+ *for us
+ */
+ if (!fscrypt_should_be_processed_by_ice(inode1) &&
+ !fscrypt_should_be_processed_by_ice(inode2))
+ return true;
+
+ /* one belongs to ice, the other does not -> not equal */
+ if (fscrypt_should_be_processed_by_ice(inode1) ^
+ fscrypt_should_be_processed_by_ice(inode2))
+ return false;
+
+ key1 = fscrypt_get_ice_encryption_key(inode1);
+ key2 = fscrypt_get_ice_encryption_key(inode2);
+ salt1 = fscrypt_get_ice_encryption_salt(inode1);
+ salt2 = fscrypt_get_ice_encryption_salt(inode2);
+
+ /* key and salt should not be null by this point */
+ if (!key1 || !key2 || !salt1 || !salt2 ||
+ (fscrypt_get_ice_encryption_key_size(inode1) !=
+ fscrypt_get_ice_encryption_key_size(inode2)) ||
+ (fscrypt_get_ice_encryption_salt_size(inode1) !=
+ fscrypt_get_ice_encryption_salt_size(inode2)))
+ return false;
+
+ if ((memcmp(key1, key2,
+ fscrypt_get_ice_encryption_key_size(inode1)) == 0) &&
+ (memcmp(salt1, salt2,
+ fscrypt_get_ice_encryption_salt_size(inode1)) == 0))
+ return true;
+
+ return false;
+}
+
+void fscrypt_set_ice_dun(const struct inode *inode, struct bio *bio, u64 dun)
+{
+ if (fscrypt_should_be_processed_by_ice(inode))
+ bio->bi_iter.bi_dun = dun;
+}
+EXPORT_SYMBOL(fscrypt_set_ice_dun);
+
+/*
+ * This function will be used for filesystem when deciding to merge bios.
+ * Basic assumption is, if inline_encryption is set, single bio has to
+ * guarantee consecutive LBAs as well as ino|pg->index.
+ */
+bool fscrypt_mergeable_bio(struct bio *bio, u64 dun, bool bio_encrypted)
+{
+ if (!bio)
+ return true;
+
+ /* if both of them are not encrypted, no further check is needed */
+ if (!bio_dun(bio) && !bio_encrypted)
+ return true;
+
+ /* ICE allows only consecutive iv_key stream. */
+ return bio_end_dun(bio) == dun;
+}
+EXPORT_SYMBOL(fscrypt_mergeable_bio);
diff --git a/fs/crypto/fscrypt_ice.h b/fs/crypto/fscrypt_ice.h
new file mode 100644
index 0000000..c540506
--- /dev/null
+++ b/fs/crypto/fscrypt_ice.h
@@ -0,0 +1,106 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _FSCRYPT_ICE_H
+#define _FSCRYPT_ICE_H
+
+#include <linux/blkdev.h>
+#include "fscrypt_private.h"
+
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+static inline bool fscrypt_should_be_processed_by_ice(const struct inode *inode)
+{
+ if (!inode->i_sb->s_cop)
+ return 0;
+ if (!inode->i_sb->s_cop->is_encrypted((struct inode *)inode))
+ return 0;
+
+ return fscrypt_using_hardware_encryption(inode);
+}
+
+static inline int fscrypt_is_ice_capable(const struct super_block *sb)
+{
+ return blk_queue_inlinecrypt(bdev_get_queue(sb->s_bdev));
+}
+
+int fscrypt_is_aes_xts_cipher(const struct inode *inode);
+
+char *fscrypt_get_ice_encryption_key(const struct inode *inode);
+char *fscrypt_get_ice_encryption_salt(const struct inode *inode);
+
+bool fscrypt_is_ice_encryption_info_equal(const struct inode *inode1,
+ const struct inode *inode2);
+
+static inline size_t fscrypt_get_ice_encryption_key_size(
+ const struct inode *inode)
+{
+ return FS_AES_256_XTS_KEY_SIZE / 2;
+}
+
+static inline size_t fscrypt_get_ice_encryption_salt_size(
+ const struct inode *inode)
+{
+ return FS_AES_256_XTS_KEY_SIZE / 2;
+}
+#else
+static inline bool fscrypt_should_be_processed_by_ice(const struct inode *inode)
+{
+ return 0;
+}
+
+static inline int fscrypt_is_ice_capable(const struct super_block *sb)
+{
+ return 0;
+}
+
+static inline char *fscrypt_get_ice_encryption_key(const struct inode *inode)
+{
+ return NULL;
+}
+
+static inline char *fscrypt_get_ice_encryption_salt(const struct inode *inode)
+{
+ return NULL;
+}
+
+static inline size_t fscrypt_get_ice_encryption_key_size(
+ const struct inode *inode)
+{
+ return 0;
+}
+
+static inline size_t fscrypt_get_ice_encryption_salt_size(
+ const struct inode *inode)
+{
+ return 0;
+}
+
+static inline int fscrypt_is_xts_cipher(const struct inode *inode)
+{
+ return 0;
+}
+
+static inline bool fscrypt_is_ice_encryption_info_equal(
+ const struct inode *inode1,
+ const struct inode *inode2)
+{
+ return 0;
+}
+
+static inline int fscrypt_is_aes_xts_cipher(const struct inode *inode)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* _FSCRYPT_ICE_H */
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index d36a648..8f73c1d 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -11,9 +11,12 @@
#ifndef _FSCRYPT_PRIVATE_H
#define _FSCRYPT_PRIVATE_H
+#ifndef __FS_HAS_ENCRYPTION
#define __FS_HAS_ENCRYPTION 1
+#endif
#include <linux/fscrypt.h>
#include <crypto/hash.h>
+#include <linux/pfk.h>
/* Encryption parameters */
#define FS_IV_SIZE 16
@@ -58,17 +61,25 @@
char encrypted_path[1];
} __packed;
+enum ci_mode_info {
+ CI_NONE_MODE = 0,
+ CI_DATA_MODE,
+ CI_FNAME_MODE,
+};
+
/*
* A pointer to this structure is stored in the file system's in-core
* representation of an inode.
*/
struct fscrypt_info {
+ u8 ci_mode;
u8 ci_data_mode;
u8 ci_filename_mode;
u8 ci_flags;
struct crypto_skcipher *ci_ctfm;
struct crypto_cipher *ci_essiv_tfm;
u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
+ u8 ci_raw_key[FS_MAX_KEY_SIZE];
};
typedef enum {
@@ -90,9 +101,18 @@
filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
return true;
+ if (contents_mode == FS_ENCRYPTION_MODE_PRIVATE)
+ return true;
+
return false;
}
+static inline bool is_private_data_mode(struct fscrypt_info *ci)
+{
+ return ci->ci_mode == CI_DATA_MODE &&
+ ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE;
+}
+
/* crypto.c */
extern struct kmem_cache *fscrypt_info_cachep;
extern int fscrypt_initialize(unsigned int cop_flags);
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index aae68c0..d8478e7 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -15,6 +15,7 @@
#include <crypto/sha.h>
#include <crypto/skcipher.h>
#include "fscrypt_private.h"
+#include "fscrypt_ice.h"
static struct crypto_shash *essiv_hash_tfm;
@@ -67,7 +68,7 @@
}
static int validate_user_key(struct fscrypt_info *crypt_info,
- struct fscrypt_context *ctx, u8 *raw_key,
+ struct fscrypt_context *ctx,
const char *prefix, int min_keysize)
{
char *description;
@@ -115,7 +116,24 @@
res = -ENOKEY;
goto out;
}
- res = derive_key_aes(ctx->nonce, master_key, raw_key);
+ res = derive_key_aes(ctx->nonce, master_key, crypt_info->ci_raw_key);
+ /* If we don't need to derive, we still want to do everything
+ * up until now to validate the key. It's cleaner to fail now
+ * than to fail in block I/O.
+ if (!is_private_data_mode(crypt_info)) {
+ res = derive_key_aes(ctx->nonce, master_key,
+ crypt_info->ci_raw_key);
+ } else {
+ * Inline encryption: no key derivation required because IVs are
+ * assigned based on iv_sector.
+
+ BUILD_BUG_ON(sizeof(crypt_info->ci_raw_key) !=
+ sizeof(master_key->raw));
+ memcpy(crypt_info->ci_raw_key,
+ master_key->raw, sizeof(crypt_info->ci_raw_key));
+ res = 0;
+ }
+ */
out:
up_read(&keyring_key->sem);
key_put(keyring_key);
@@ -134,10 +152,12 @@
FS_AES_128_CBC_KEY_SIZE },
[FS_ENCRYPTION_MODE_AES_128_CTS] = { "cts(cbc(aes))",
FS_AES_128_CTS_KEY_SIZE },
+ [FS_ENCRYPTION_MODE_PRIVATE] = { "bugon",
+ FS_AES_256_XTS_KEY_SIZE },
};
static int determine_cipher_type(struct fscrypt_info *ci, struct inode *inode,
- const char **cipher_str_ret, int *keysize_ret)
+ const char **cipher_str_ret, int *keysize_ret, int *fname)
{
u32 mode;
@@ -149,9 +169,12 @@
}
if (S_ISREG(inode->i_mode)) {
+ ci->ci_mode = CI_DATA_MODE;
mode = ci->ci_data_mode;
} else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) {
+ ci->ci_mode = CI_FNAME_MODE;
mode = ci->ci_filename_mode;
+ *fname = 1;
} else {
WARN_ONCE(1, "fscrypt: filesystem tried to load encryption info for inode %lu, which is not encryptable (file type %d)\n",
inode->i_ino, (inode->i_mode & S_IFMT));
@@ -170,6 +193,7 @@
crypto_free_skcipher(ci->ci_ctfm);
crypto_free_cipher(ci->ci_essiv_tfm);
+ memset(ci, 0, sizeof(*ci)); /* sanitizes ->ci_raw_key */
kmem_cache_free(fscrypt_info_cachep, ci);
}
@@ -239,6 +263,12 @@
crypto_free_shash(essiv_hash_tfm);
}
+static int fscrypt_data_encryption_mode(struct inode *inode)
+{
+ return fscrypt_should_be_processed_by_ice(inode) ?
+ FS_ENCRYPTION_MODE_PRIVATE : FS_ENCRYPTION_MODE_AES_256_XTS;
+}
+
int fscrypt_get_encryption_info(struct inode *inode)
{
struct fscrypt_info *crypt_info;
@@ -246,8 +276,8 @@
struct crypto_skcipher *ctfm;
const char *cipher_str;
int keysize;
- u8 *raw_key = NULL;
int res;
+ int fname = 0;
if (inode->i_crypt_info)
return 0;
@@ -264,7 +294,8 @@
/* Fake up a context for an unencrypted directory */
memset(&ctx, 0, sizeof(ctx));
ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
- ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS;
+ ctx.contents_encryption_mode =
+ fscrypt_data_encryption_mode(inode);
ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS;
memset(ctx.master_key_descriptor, 0x42, FS_KEY_DESCRIPTOR_SIZE);
} else if (res != sizeof(ctx)) {
@@ -289,7 +320,8 @@
memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
sizeof(crypt_info->ci_master_key));
- res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize);
+ res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize,
+ &fname);
if (res)
goto out;
@@ -298,14 +330,11 @@
* crypto API as part of key derivation.
*/
res = -ENOMEM;
- raw_key = kmalloc(FS_MAX_KEY_SIZE, GFP_NOFS);
- if (!raw_key)
- goto out;
- res = validate_user_key(crypt_info, &ctx, raw_key, FS_KEY_DESC_PREFIX,
+ res = validate_user_key(crypt_info, &ctx, FS_KEY_DESC_PREFIX,
keysize);
if (res && inode->i_sb->s_cop->key_prefix) {
- int res2 = validate_user_key(crypt_info, &ctx, raw_key,
+ int res2 = validate_user_key(crypt_info, &ctx,
inode->i_sb->s_cop->key_prefix,
keysize);
if (res2) {
@@ -313,9 +342,23 @@
res = -ENOKEY;
goto out;
}
+ res = 0;
} else if (res) {
goto out;
}
+
+ if (is_private_data_mode(crypt_info)) {
+ if (!fscrypt_is_ice_capable(inode->i_sb)) {
+ pr_warn("%s: ICE support not available\n",
+ __func__);
+ res = -EINVAL;
+ goto out;
+ }
+ /* Let's encrypt/decrypt by ICE */
+ goto do_ice;
+ }
+
+
ctfm = crypto_alloc_skcipher(cipher_str, 0, 0);
if (!ctfm || IS_ERR(ctfm)) {
res = ctfm ? PTR_ERR(ctfm) : -ENOMEM;
@@ -330,26 +373,29 @@
* if the provided key is longer than keysize, we use the first
* keysize bytes of the derived key only
*/
- res = crypto_skcipher_setkey(ctfm, raw_key, keysize);
+ res = crypto_skcipher_setkey(ctfm, crypt_info->ci_raw_key, keysize);
if (res)
goto out;
if (S_ISREG(inode->i_mode) &&
crypt_info->ci_data_mode == FS_ENCRYPTION_MODE_AES_128_CBC) {
- res = init_essiv_generator(crypt_info, raw_key, keysize);
+ res = init_essiv_generator(crypt_info, crypt_info->ci_raw_key,
+ keysize);
if (res) {
pr_debug("%s: error %d (inode %lu) allocating essiv tfm\n",
__func__, res, inode->i_ino);
goto out;
}
}
+ memzero_explicit(crypt_info->ci_raw_key,
+ sizeof(crypt_info->ci_raw_key));
+do_ice:
if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL)
crypt_info = NULL;
out:
if (res == -ENOKEY)
res = 0;
put_crypt_info(crypt_info);
- kzfree(raw_key);
return res;
}
EXPORT_SYMBOL(fscrypt_get_encryption_info);
diff --git a/fs/direct-io.c b/fs/direct-io.c
index c6220a2..bf03a92 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -411,6 +411,7 @@
if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty)
bio_set_pages_dirty(bio);
+ bio->bi_dio_inode = dio->inode;
dio->bio_bdev = bio->bi_bdev;
if (sdio->submit_io) {
@@ -424,6 +425,18 @@
sdio->logical_offset_in_bio = 0;
}
+struct inode *dio_bio_get_inode(struct bio *bio)
+{
+ struct inode *inode = NULL;
+
+ if (bio == NULL)
+ return NULL;
+
+ inode = bio->bi_dio_inode;
+
+ return inode;
+}
+EXPORT_SYMBOL(dio_bio_get_inode);
/*
* Release any resources in case of a failure
*/
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index e38039f..e9232a0 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -109,10 +109,16 @@
decrypted pages in the page cache.
config EXT4_FS_ENCRYPTION
- bool
- default y
+ bool "Ext4 FS Encryption"
+ default n
depends on EXT4_ENCRYPTION
+config EXT4_FS_ICE_ENCRYPTION
+ bool "Ext4 Encryption with ICE support"
+ default n
+ depends on EXT4_FS_ENCRYPTION
+ depends on PFK
+
config EXT4_DEBUG
bool "EXT4 debugging support"
depends on EXT4_FS
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index 354103f..b9dfa0d 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -1,6 +1,7 @@
#
# Makefile for the linux ext4-filesystem routines.
#
+ccflags-y += -Ifs/crypto
obj-$(CONFIG_EXT4_FS) += ext4.o
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 2bf83d0..ecee29a 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -42,6 +42,7 @@
#include "xattr.h"
#include "acl.h"
#include "truncate.h"
+//#include "ext4_ice.h"
#include <trace/events/ext4.h>
#include <trace/events/android_fs.h>
@@ -1152,7 +1153,8 @@
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
*wait_bh++ = bh;
decrypt = ext4_encrypted_inode(inode) &&
- S_ISREG(inode->i_mode);
+ S_ISREG(inode->i_mode) &&
+ !fscrypt_using_hardware_encryption(inode);
}
}
/*
@@ -3509,8 +3511,9 @@
get_block_func = ext4_dio_get_block_unwritten_async;
dio_flags = DIO_LOCKING;
}
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
- BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
+#if defined(CONFIG_EXT4_FS_ENCRYPTION)
+ WARN_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)
+ && !fscrypt_using_hardware_encryption(inode));
#endif
if (IS_DAX(inode)) {
ret = dax_do_io(iocb, inode, iter, get_block_func,
@@ -3631,8 +3634,9 @@
ssize_t ret;
int rw = iov_iter_rw(iter);
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
- if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode))
+#if defined(CONFIG_EXT4_FS_ENCRYPTION)
+ if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)
+ && !fscrypt_using_hardware_encryption(inode))
return 0;
#endif
@@ -3828,7 +3832,8 @@
if (!buffer_uptodate(bh))
goto unlock;
if (S_ISREG(inode->i_mode) &&
- ext4_encrypted_inode(inode)) {
+ ext4_encrypted_inode(inode) &&
+ !fscrypt_using_hardware_encryption(inode)) {
/* We expect the key to be set. */
BUG_ON(!fscrypt_has_encryption_key(inode));
BUG_ON(blocksize != PAGE_SIZE);
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 15ca15c..3585e26 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -774,8 +774,8 @@
return ext4_ext_precache(inode);
case EXT4_IOC_SET_ENCRYPTION_POLICY:
- if (!ext4_has_feature_encrypt(sb))
- return -EOPNOTSUPP;
+// if (!ext4_has_feature_encrypt(sb))
+// return -EOPNOTSUPP;
return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
case EXT4_IOC_GET_ENCRYPTION_PWSALT: {
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 0718a86..8d4ec1a 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -28,6 +28,7 @@
#include "ext4_jbd2.h"
#include "xattr.h"
#include "acl.h"
+//#include "ext4_ice.h"
static struct kmem_cache *io_end_cachep;
@@ -469,6 +470,7 @@
gfp_t gfp_flags = GFP_NOFS;
retry_encrypt:
+ if (!fscrypt_using_hardware_encryption(inode))
data_page = fscrypt_encrypt_page(inode, page, PAGE_SIZE, 0,
page->index, gfp_flags);
if (IS_ERR(data_page)) {
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index e95b6e1..23c5716 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1180,6 +1180,11 @@
EXT4_NAME_LEN;
}
+static inline bool ext4_is_encrypted(struct inode *inode)
+{
+ return ext4_encrypted_inode(inode);
+}
+
static const struct fscrypt_operations ext4_cryptops = {
.key_prefix = "ext4:",
.get_context = ext4_get_context,
@@ -1187,6 +1192,7 @@
.dummy_context = ext4_dummy_context,
.empty_dir = ext4_empty_dir,
.max_namelen = ext4_max_namelen,
+ .is_encrypted = ext4_is_encrypted,
};
#endif
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index b1fd4e2..66b069b 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -437,6 +437,7 @@
struct bio *bio;
struct page *page = fio->encrypted_page ?
fio->encrypted_page : fio->page;
+ struct inode *inode = fio->page->mapping->host;
verify_block_addr(fio, fio->new_blkaddr);
trace_f2fs_submit_page_bio(page, fio);
@@ -446,6 +447,9 @@
bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc,
1, is_read_io(fio->op), fio->type, fio->temp);
+ if (f2fs_may_encrypt_bio(inode, fio))
+ fscrypt_set_ice_dun(inode, bio, PG_DUN(inode, fio->page));
+
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio);
return -EFAULT;
@@ -465,6 +469,9 @@
enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
struct page *bio_page;
+ struct inode *inode;
+ bool bio_encrypted;
+ u64 dun;
int err = 0;
f2fs_bug_on(sbi, is_read_io(fio->op));
@@ -488,6 +495,9 @@
verify_block_addr(fio, fio->new_blkaddr);
bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
+ inode = fio->page->mapping->host;
+ dun = PG_DUN(inode, fio->page);
+ bio_encrypted = f2fs_may_encrypt_bio(inode, fio);
/* set submitted = true as a return value */
fio->submitted = true;
@@ -498,6 +508,11 @@
(io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
!__same_bdev(sbi, fio->new_blkaddr, io->bio)))
__submit_merged_bio(io);
+
+ /* ICE support */
+ if (!fscrypt_mergeable_bio(io->bio, dun, bio_encrypted))
+ __submit_merged_bio(io);
+
alloc_new:
if (io->bio == NULL) {
if ((fio->type == DATA || fio->type == NODE) &&
@@ -509,6 +524,9 @@
io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc,
BIO_MAX_PAGES, false,
fio->type, fio->temp);
+ if (bio_encrypted)
+ fscrypt_set_ice_dun(inode, io->bio, dun);
+
io->fio = *fio;
}
@@ -575,6 +593,9 @@
if (IS_ERR(bio))
return PTR_ERR(bio);
+ if (f2fs_may_encrypt_bio(inode, NULL))
+ fscrypt_set_ice_dun(inode, bio, PG_DUN(inode, page));
+
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio);
return -EFAULT;
@@ -1434,6 +1455,8 @@
sector_t last_block_in_file;
sector_t block_nr;
struct f2fs_map_blocks map;
+ bool bio_encrypted;
+ u64 dun;
map.m_pblk = 0;
map.m_lblk = 0;
@@ -1511,6 +1534,14 @@
__submit_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
+
+ dun = PG_DUN(inode, page);
+ bio_encrypted = f2fs_may_encrypt_bio(inode, NULL);
+ if (!fscrypt_mergeable_bio(bio, dun, bio_encrypted)) {
+ __submit_bio(F2FS_I_SB(inode), bio, DATA);
+ bio = NULL;
+ }
+
if (bio == NULL) {
bio = f2fs_grab_read_bio(inode, block_nr, nr_pages);
if (IS_ERR(bio)) {
@@ -1518,7 +1549,8 @@
goto set_error_page;
}
}
-
+ if (bio_encrypted)
+ fscrypt_set_ice_dun(inode, bio, dun);
if (bio_add_page(bio, page, blocksize, 0) < blocksize)
goto submit_and_realloc;
@@ -1588,6 +1620,9 @@
f2fs_wait_on_block_writeback(fio->sbi, fio->old_blkaddr);
retry_encrypt:
+ if (fscrypt_using_hardware_encryption(inode))
+ return 0;
+
fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
PAGE_SIZE, 0, fio->page->index, gfp_flags);
if (!IS_ERR(fio->encrypted_page))
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 84be1e7..c65b697 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -3321,9 +3321,20 @@
static inline bool f2fs_force_buffered_io(struct inode *inode, int rw)
{
- return (f2fs_post_read_required(inode) ||
+ return ((f2fs_post_read_required(inode) &&
+ !fscrypt_using_hardware_encryption(inode)) ||
(rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) ||
F2FS_I_SB(inode)->s_ndevs);
}
+static inline bool f2fs_may_encrypt_bio(struct inode *inode,
+ struct f2fs_io_info *fio)
+{
+ if (fio && (fio->type != DATA || fio->encrypted_page))
+ return false;
+
+ return (f2fs_encrypted_file(inode) &&
+ fscrypt_using_hardware_encryption(inode));
+}
+
#endif
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 7d4621a..771b039 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1936,6 +1936,11 @@
inode->i_sb->s_blocksize : F2FS_NAME_LEN;
}
+static inline bool f2fs_is_encrypted(struct inode *inode)
+{
+ return f2fs_encrypted_file(inode);
+}
+
static const struct fscrypt_operations f2fs_cryptops = {
.key_prefix = "f2fs:",
.get_context = f2fs_get_context,
@@ -1943,6 +1948,7 @@
.dummy_context = f2fs_dummy_context,
.empty_dir = f2fs_empty_dir,
.max_namelen = f2fs_max_namelen,
+ .is_encrypted = f2fs_is_encrypted,
};
#endif
diff --git a/fs/namei.c b/fs/namei.c
index a5a05d3..c138ab1 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2905,6 +2905,11 @@
if (error)
return error;
error = dir->i_op->create(dir, dentry, mode, want_excl);
+ if (error)
+ return error;
+ error = security_inode_post_create(dir, dentry, mode);
+ if (error)
+ return error;
if (!error)
fsnotify_create(dir, dentry);
return error;
@@ -3720,6 +3725,13 @@
return error;
error = dir->i_op->mknod(dir, dentry, mode, dev);
+ if (error)
+ return error;
+
+ error = security_inode_post_create(dir, dentry, mode);
+ if (error)
+ return error;
+
if (!error)
fsnotify_create(dir, dentry);
return error;
diff --git a/include/dt-bindings/arm/arm-smmu.h b/include/dt-bindings/arm/arm-smmu.h
index 1de45a9..3a1dbd3 100644
--- a/include/dt-bindings/arm/arm-smmu.h
+++ b/include/dt-bindings/arm/arm-smmu.h
@@ -23,6 +23,5 @@
#define ARM_SMMU_OPT_MMU500_ERRATA1 (1 << 7)
#define ARM_SMMU_OPT_STATIC_CB (1 << 8)
#define ARM_SMMU_OPT_HALT (1 << 9)
-#define ARM_SMMU_OPT_HIBERNATION (1 << 10)
#endif
diff --git a/include/dt-bindings/clock/msm-clocks-8952.h b/include/dt-bindings/clock/msm-clocks-8952.h
index 3190d4f..e66c5ed 100644
--- a/include/dt-bindings/clock/msm-clocks-8952.h
+++ b/include/dt-bindings/clock/msm-clocks-8952.h
@@ -237,8 +237,10 @@
#define clk_dsi0pll_byte_clk_src 0xbbaa30be
#define clk_dsi0pll_pixel_clk_src 0x45b3260f
+#define clk_dsi0pll_vco_clk 0x15940d40
#define clk_dsi1pll_byte_clk_src 0x63930a8f
#define clk_dsi1pll_pixel_clk_src 0x0e4c9b56
+#define clk_dsi1pll_vco_clk 0x99797b50
#define clk_dsi_pll0_byte_clk_src 0x44539836
#define clk_dsi_pll0_pixel_clk_src 0x5767c287
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 97cb48f..0885c9f 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -61,6 +61,9 @@
((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
#define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9)
#define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio)))
+#define bio_dun(bio) ((bio)->bi_iter.bi_dun)
+#define bio_duns(bio) (bio_sectors(bio) >> 3) /* 4KB unit */
+#define bio_end_dun(bio) (bio_dun(bio) + bio_duns(bio))
/*
* Check whether this bio carries any data or not. A NULL bio is allowed.
@@ -169,6 +172,11 @@
{
iter->bi_sector += bytes >> 9;
+#ifdef CONFIG_PFK
+ if (iter->bi_dun)
+ iter->bi_dun += bytes >> 12;
+#endif
+
if (bio_no_advance_iter(bio))
iter->bi_size -= bytes;
else
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index e4d84d3..5a3712c 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -66,6 +66,10 @@
struct bio_integrity_payload *bi_integrity; /* data integrity */
#endif
};
+#ifdef CONFIG_PFK
+ /* Encryption key to use (NULL if none) */
+ const struct blk_encryption_key *bi_crypt_key;
+#endif
unsigned short bi_vcnt; /* how many bio_vec's */
@@ -82,6 +86,12 @@
struct bio_set *bi_pool;
/*
+ * When using dircet-io (O_DIRECT), we can't get the inode from a bio
+ * by walking bio->bi_io_vec->bv_page->mapping->host
+ * since the page is anon.
+ */
+ struct inode *bi_dio_inode;
+ /*
* We can inline a number of vecs at the end of the bio, to avoid
* double allocations for a small number of bio_vecs. This member
* MUST obviously be kept at the very end of the bio.
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index b0f981a..661dcec 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -509,6 +509,7 @@
#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
#define QUEUE_FLAG_DAX 26 /* device supports DAX */
#define QUEUE_FLAG_FAST 27 /* fast block device (e.g. ram based) */
+#define QUEUE_FLAG_INLINECRYPT 28 /* inline encryption support */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -600,6 +601,8 @@
(test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
#define blk_queue_fast(q) test_bit(QUEUE_FLAG_FAST, &(q)->queue_flags)
+#define blk_queue_inlinecrypt(q) \
+ test_bit(QUEUE_FLAG_INLINECRYPT, &(q)->queue_flags)
#define blk_noretry_request(rq) \
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index 89b65b8..dbf1f2c 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -41,6 +41,7 @@
unsigned int bi_bvec_done; /* number of bytes completed in
current bvec */
+ u64 bi_dun; /* DUN setting for bio */
};
/*
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 8a3bdad..1c4da43 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2948,6 +2948,8 @@
wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
}
+struct inode *dio_bio_get_inode(struct bio *bio);
+
extern void inode_set_flags(struct inode *inode, unsigned int flags,
unsigned int mask);
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index 9e535af..8f4e90c 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -18,6 +18,14 @@
#define FS_CRYPTO_BLOCK_SIZE 16
struct fscrypt_ctx;
+
+/* iv sector for security/pfe/pfk_fscrypt.c and f2fs. sizeof is required
+ * to accommodate 32 bit targets.
+ */
+#define PG_DUN(i, p) \
+ ((((i)->i_ino & 0xffffffff) << (sizeof((i)->i_ino)/2)) | \
+ ((p)->index & 0xffffffff))
+
struct fscrypt_info;
struct fscrypt_str {
diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h
index 6f97714..ce4df7a 100644
--- a/include/linux/fscrypt_notsupp.h
+++ b/include/linux/fscrypt_notsupp.h
@@ -183,6 +183,21 @@
return -EOPNOTSUPP;
}
+/* fscrypt_ice.c */
+static inline int fscrypt_using_hardware_encryption(const struct inode *inode)
+{
+ return 0;
+}
+
+static inline void fscrypt_set_ice_dun(const struct inode *inode,
+ struct bio *bio, u64 dun){}
+
+static inline bool fscrypt_mergeable_bio(struct bio *bio,
+ sector_t iv_block, bool bio_encrypted)
+{
+ return true;
+}
+
/* hooks.c */
static inline int fscrypt_file_open(struct inode *inode, struct file *filp)
diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h
index 1ed79ee..32e2b6c 100644
--- a/include/linux/fscrypt_supp.h
+++ b/include/linux/fscrypt_supp.h
@@ -29,6 +29,7 @@
bool (*dummy_context)(struct inode *);
bool (*empty_dir)(struct inode *);
unsigned (*max_namelen)(struct inode *);
+ bool (*is_encrypted)(struct inode *);
};
struct fscrypt_ctx {
@@ -195,6 +196,13 @@
extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
unsigned int);
+/* fscrypt_ice.c */
+extern int fscrypt_using_hardware_encryption(const struct inode *inode);
+extern void fscrypt_set_ice_dun(const struct inode *inode,
+ struct bio *bio, u64 dun);
+extern bool fscrypt_mergeable_bio(struct bio *bio, u64 dun, bool bio_encrypted);
+
+
/* hooks.c */
extern int fscrypt_file_open(struct inode *inode, struct file *filp);
extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir);
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index c122409..914eb4a 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -451,6 +451,8 @@
}
void gic_show_pending_irqs(void);
+void gic_v3_dist_save(void);
+void gic_v3_dist_restore(void);
unsigned int get_gic_highpri_irq(void);
#endif
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index be65c03..f510c68 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1452,6 +1452,8 @@
size_t *len);
int (*inode_create)(struct inode *dir, struct dentry *dentry,
umode_t mode);
+ int (*inode_post_create)(struct inode *dir, struct dentry *dentry,
+ umode_t mode);
int (*inode_link)(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry);
int (*inode_unlink)(struct inode *dir, struct dentry *dentry);
@@ -1750,6 +1752,7 @@
struct list_head inode_free_security;
struct list_head inode_init_security;
struct list_head inode_create;
+ struct list_head inode_post_create;
struct list_head inode_link;
struct list_head inode_unlink;
struct list_head inode_symlink;
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 7228bcd..9a20d3c 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -652,6 +652,8 @@
void *cmdq_private;
struct mmc_request *err_mrq;
+ bool inlinecrypt_support; /* Inline encryption support */
+
atomic_t rpmb_req_pending;
struct mutex rpmb_req_mutex;
unsigned long private[0] ____cacheline_aligned;
diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h
index 81e7e75..3b1e0e7 100644
--- a/include/linux/msm_gsi.h
+++ b/include/linux/msm_gsi.h
@@ -12,6 +12,7 @@
#ifndef MSM_GSI_H
#define MSM_GSI_H
#include <linux/types.h>
+#include <linux/interrupt.h>
enum gsi_ver {
GSI_VER_ERR = 0,
@@ -82,6 +83,9 @@
* @irq: IRQ number
* @phys_addr: physical address of GSI block
* @size: register size of GSI block
+ * @emulator_intcntrlr_addr: the location of emulator's interrupt control block
+ * @emulator_intcntrlr_size: the sise of emulator_intcntrlr_addr
+ * @emulator_intcntrlr_client_isr: client's isr. Called by the emulator's isr
* @mhi_er_id_limits_valid: valid flag for mhi_er_id_limits
* @mhi_er_id_limits: MHI event ring start and end ids
* @notify_cb: general notification callback
@@ -107,6 +111,9 @@
unsigned int irq;
phys_addr_t phys_addr;
unsigned long size;
+ phys_addr_t emulator_intcntrlr_addr;
+ unsigned long emulator_intcntrlr_size;
+ irq_handler_t emulator_intcntrlr_client_isr;
bool mhi_er_id_limits_valid;
uint32_t mhi_er_id_limits[2];
void (*notify_cb)(struct gsi_per_notify *notify);
@@ -1285,7 +1292,8 @@
return -GSI_STATUS_UNSUPPORTED_OP;
}
-static inline int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size)
+static inline int gsi_enable_fw(
+ phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver)
{
return -GSI_STATUS_UNSUPPORTED_OP;
}
diff --git a/include/linux/of.h b/include/linux/of.h
index 299aeb1..e44e9a3 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -39,7 +39,9 @@
struct property *next;
unsigned long _flags;
unsigned int unique_id;
+#if defined(CONFIG_OF_KOBJ)
struct bin_attribute attr;
+#endif
};
#if defined(CONFIG_SPARC)
@@ -58,7 +60,9 @@
struct device_node *parent;
struct device_node *child;
struct device_node *sibling;
+#if defined(CONFIG_OF_KOBJ)
struct kobject kobj;
+#endif
unsigned long _flags;
void *data;
#if defined(CONFIG_SPARC)
@@ -102,21 +106,17 @@
extern struct kobj_type of_node_ktype;
static inline void of_node_init(struct device_node *node)
{
+#if defined(CONFIG_OF_KOBJ)
kobject_init(&node->kobj, &of_node_ktype);
+#endif
node->fwnode.type = FWNODE_OF;
}
-/* true when node is initialized */
-static inline int of_node_is_initialized(struct device_node *node)
-{
- return node && node->kobj.state_initialized;
-}
-
-/* true when node is attached (i.e. present on sysfs) */
-static inline int of_node_is_attached(struct device_node *node)
-{
- return node && node->kobj.state_in_sysfs;
-}
+#if defined(CONFIG_OF_KOBJ)
+#define of_node_kobj(n) (&(n)->kobj)
+#else
+#define of_node_kobj(n) NULL
+#endif
#ifdef CONFIG_OF_DYNAMIC
extern struct device_node *of_node_get(struct device_node *node);
diff --git a/include/linux/pfk.h b/include/linux/pfk.h
new file mode 100644
index 0000000..d7405ea
--- /dev/null
+++ b/include/linux/pfk.h
@@ -0,0 +1,70 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PFK_H_
+#define PFK_H_
+
+#include <linux/bio.h>
+
+struct ice_crypto_setting;
+
+#ifdef CONFIG_PFK
+
+/*
+ * Default key for inline encryption.
+ *
+ * For now only AES-256-XTS is supported, so this is a fixed length. But if
+ * ever needed, this should be made variable-length with a 'mode' and 'size'.
+ * (Remember to update pfk_allow_merge_bio() when doing so!)
+ */
+#define BLK_ENCRYPTION_KEY_SIZE_AES_256_XTS 64
+
+struct blk_encryption_key {
+ u8 raw[BLK_ENCRYPTION_KEY_SIZE_AES_256_XTS];
+};
+
+int pfk_load_key_start(const struct bio *bio,
+ struct ice_crypto_setting *ice_setting, bool *is_pfe, bool);
+int pfk_load_key_end(const struct bio *bio, bool *is_pfe);
+int pfk_remove_key(const unsigned char *key, size_t key_size);
+bool pfk_allow_merge_bio(const struct bio *bio1, const struct bio *bio2);
+void pfk_clear_on_reset(void);
+
+#else
+static inline int pfk_load_key_start(const struct bio *bio,
+ struct ice_crypto_setting *ice_setting, bool *is_pfe, bool async)
+{
+ return -ENODEV;
+}
+
+static inline int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
+{
+ return -ENODEV;
+}
+
+static inline int pfk_remove_key(const unsigned char *key, size_t key_size)
+{
+ return -ENODEV;
+}
+
+static inline bool pfk_allow_merge_bio(const struct bio *bio1,
+ const struct bio *bio2)
+{
+ return true;
+}
+
+static inline void pfk_clear_on_reset(void)
+{}
+
+#endif /* CONFIG_PFK */
+
+#endif /* PFK_H */
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 2fb9f03..78c2a9f 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -282,6 +282,10 @@
POWER_SUPPLY_PROP_ALLOW_HVDCP3,
POWER_SUPPLY_PROP_HVDCP_OPTI_ALLOWED,
POWER_SUPPLY_PROP_MAX_PULSE_ALLOWED,
+ POWER_SUPPLY_PROP_IGNORE_FALSE_NEGATIVE_ISENSE,
+ POWER_SUPPLY_PROP_BATTERY_INFO,
+ POWER_SUPPLY_PROP_BATTERY_INFO_ID,
+ POWER_SUPPLY_PROP_ENABLE_JEITA_DETECTION,
/* Local extensions of type int64_t */
POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
/* Properties of type `const char *' */
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
new file mode 100644
index 0000000..600aadf
--- /dev/null
+++ b/include/linux/refcount.h
@@ -0,0 +1,294 @@
+#ifndef _LINUX_REFCOUNT_H
+#define _LINUX_REFCOUNT_H
+
+/*
+ * Variant of atomic_t specialized for reference counts.
+ *
+ * The interface matches the atomic_t interface (to aid in porting) but only
+ * provides the few functions one should use for reference counting.
+ *
+ * It differs in that the counter saturates at UINT_MAX and will not move once
+ * there. This avoids wrapping the counter and causing 'spurious'
+ * use-after-free issues.
+ *
+ * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
+ * and provide only what is strictly required for refcounts.
+ *
+ * The increments are fully relaxed; these will not provide ordering. The
+ * rationale is that whatever is used to obtain the object we're increasing the
+ * reference count on will provide the ordering. For locked data structures,
+ * its the lock acquire, for RCU/lockless data structures its the dependent
+ * load.
+ *
+ * Do note that inc_not_zero() provides a control dependency which will order
+ * future stores against the inc, this ensures we'll never modify the object
+ * if we did not in fact acquire a reference.
+ *
+ * The decrements will provide release order, such that all the prior loads and
+ * stores will be issued before, it also provides a control dependency, which
+ * will order us against the subsequent free().
+ *
+ * The control dependency is against the load of the cmpxchg (ll/sc) that
+ * succeeded. This means the stores aren't fully ordered, but this is fine
+ * because the 1->0 transition indicates no concurrency.
+ *
+ * Note that the allocator is responsible for ordering things between free()
+ * and alloc().
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+
+#ifdef CONFIG_DEBUG_REFCOUNT
+#define REFCOUNT_WARN(cond, str) WARN_ON(cond)
+#define __refcount_check __must_check
+#else
+#define REFCOUNT_WARN(cond, str) (void)(cond)
+#define __refcount_check
+#endif
+
+typedef struct refcount_struct {
+ atomic_t refs;
+} refcount_t;
+
+#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
+
+static inline void refcount_set(refcount_t *r, unsigned int n)
+{
+ atomic_set(&r->refs, n);
+}
+
+static inline unsigned int refcount_read(const refcount_t *r)
+{
+ return atomic_read(&r->refs);
+}
+
+static inline __refcount_check
+bool refcount_add_not_zero(unsigned int i, refcount_t *r)
+{
+ unsigned int old, new, val = atomic_read(&r->refs);
+
+ for (;;) {
+ if (!val)
+ return false;
+
+ if (unlikely(val == UINT_MAX))
+ return true;
+
+ new = val + i;
+ if (new < val)
+ new = UINT_MAX;
+ old = atomic_cmpxchg_relaxed(&r->refs, val, new);
+ if (old == val)
+ break;
+
+ val = old;
+ }
+
+ REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
+
+ return true;
+}
+
+static inline void refcount_add(unsigned int i, refcount_t *r)
+{
+ REFCOUNT_WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
+}
+
+/*
+ * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_inc_not_zero(refcount_t *r)
+{
+ unsigned int old, new, val = atomic_read(&r->refs);
+
+ for (;;) {
+ new = val + 1;
+
+ if (!val)
+ return false;
+
+ if (unlikely(!new))
+ return true;
+
+ old = atomic_cmpxchg_relaxed(&r->refs, val, new);
+ if (old == val)
+ break;
+
+ val = old;
+ }
+
+ REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
+
+ return true;
+}
+
+/*
+ * Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller already has a
+ * reference on the object, will WARN when this is not so.
+ */
+static inline void refcount_inc(refcount_t *r)
+{
+ REFCOUNT_WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
+}
+
+/*
+ * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
+ * decrement when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_sub_and_test(unsigned int i, refcount_t *r)
+{
+ unsigned int old, new, val = atomic_read(&r->refs);
+
+ for (;;) {
+ if (unlikely(val == UINT_MAX))
+ return false;
+
+ new = val - i;
+ if (new > val) {
+ REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
+ return false;
+ }
+
+ old = atomic_cmpxchg_release(&r->refs, val, new);
+ if (old == val)
+ break;
+
+ val = old;
+ }
+
+ return !new;
+}
+
+static inline __refcount_check
+bool refcount_dec_and_test(refcount_t *r)
+{
+ return refcount_sub_and_test(1, r);
+}
+
+/*
+ * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
+ * when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before.
+ */
+static inline
+void refcount_dec(refcount_t *r)
+{
+ REFCOUNT_WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
+}
+
+/*
+ * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
+ * success thereof.
+ *
+ * Like all decrement operations, it provides release memory order and provides
+ * a control dependency.
+ *
+ * It can be used like a try-delete operator; this explicit case is provided
+ * and not cmpxchg in generic, because that would allow implementing unsafe
+ * operations.
+ */
+static inline __refcount_check
+bool refcount_dec_if_one(refcount_t *r)
+{
+ return atomic_cmpxchg_release(&r->refs, 1, 0) == 1;
+}
+
+/*
+ * No atomic_t counterpart, it decrements unless the value is 1, in which case
+ * it will return false.
+ *
+ * Was often done like: atomic_add_unless(&var, -1, 1)
+ */
+static inline __refcount_check
+bool refcount_dec_not_one(refcount_t *r)
+{
+ unsigned int old, new, val = atomic_read(&r->refs);
+
+ for (;;) {
+ if (unlikely(val == UINT_MAX))
+ return true;
+
+ if (val == 1)
+ return false;
+
+ new = val - 1;
+ if (new > val) {
+ REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
+ return true;
+ }
+
+ old = atomic_cmpxchg_release(&r->refs, val, new);
+ if (old == val)
+ break;
+
+ val = old;
+ }
+
+ return true;
+}
+
+/*
+ * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
+ * to decrement when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
+{
+ if (refcount_dec_not_one(r))
+ return false;
+
+ mutex_lock(lock);
+ if (!refcount_dec_and_test(r)) {
+ mutex_unlock(lock);
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
+ * decrement when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
+{
+ if (refcount_dec_not_one(r))
+ return false;
+
+ spin_lock(lock);
+ if (!refcount_dec_and_test(r)) {
+ spin_unlock(lock);
+ return false;
+ }
+
+ return true;
+}
+
+#endif /* _LINUX_REFCOUNT_H */
diff --git a/include/linux/security.h b/include/linux/security.h
index 3632428..bfb1b74 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -30,6 +30,7 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/fs.h>
+#include <linux/bio.h>
struct linux_binprm;
struct cred;
@@ -256,6 +257,8 @@
const struct qstr *qstr, const char **name,
void **value, size_t *len);
int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode);
+int security_inode_post_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode);
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry);
int security_inode_unlink(struct inode *dir, struct dentry *dentry);
@@ -304,6 +307,7 @@
struct fown_struct *fown, int sig);
int security_file_receive(struct file *file);
int security_file_open(struct file *file, const struct cred *cred);
+
int security_task_create(unsigned long clone_flags);
void security_task_free(struct task_struct *task);
int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
@@ -637,6 +641,13 @@
return 0;
}
+static inline int security_inode_post_create(struct inode *dir,
+ struct dentry *dentry,
+ umode_t mode)
+{
+ return 0;
+}
+
static inline int security_inode_link(struct dentry *old_dentry,
struct inode *dir,
struct dentry *new_dentry)
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index c40355f..d4f1759 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -44,6 +44,33 @@
};
/**
+ * Different states involved in USB charger detection.
+ *
+ * USB_CHG_STATE_UNDEFINED USB charger is not connected or detection
+ * process is not yet started.
+ * USB_CHG_STATE_IN_PROGRESS Charger detection in progress
+ * USB_CHG_STATE_WAIT_FOR_DCD Waiting for Data pins contact.
+ * USB_CHG_STATE_DCD_DONE Data pin contact is detected.
+ * USB_CHG_STATE_PRIMARY_DONE Primary detection is completed (Detects
+ * between SDP and DCP/CDP).
+ * USB_CHG_STATE_SECONDARY_DONE Secondary detection is completed (Detects
+ * between DCP and CDP).
+ * USB_CHG_STATE_DETECTED USB charger type is determined.
+ * USB_CHG_STATE_QUEUE_SM_WORK SM work to start/stop gadget is queued.
+ *
+ */
+enum usb_chg_state {
+ USB_CHG_STATE_UNDEFINED = 0,
+ USB_CHG_STATE_IN_PROGRESS,
+ USB_CHG_STATE_WAIT_FOR_DCD,
+ USB_CHG_STATE_DCD_DONE,
+ USB_CHG_STATE_PRIMARY_DONE,
+ USB_CHG_STATE_SECONDARY_DONE,
+ USB_CHG_STATE_DETECTED,
+ USB_CHG_STATE_QUEUE_SM_WORK,
+};
+
+/**
* USB charger types
*
* USB_INVALID_CHARGER Invalid USB charger.
@@ -126,7 +153,10 @@
* @async_int: IRQ line on which ASYNC interrupt arrived in LPM.
* @cur_power: The amount of mA available from downstream port.
* @otg_wq: Strict order otg workqueue for OTG works (SM/ID/SUSPEND).
+ * @chg_work: Charger detection work.
+ * @chg_state: The state of charger detection process.
* @chg_type: The type of charger attached.
+ * @chg_detection: True if PHY is doing charger type detection.
* @bus_perf_client: Bus performance client handle to request BUS bandwidth
* @host_bus_suspend: indicates host bus suspend or not.
* @device_bus_suspend: indicates device bus suspend or not.
@@ -149,6 +179,7 @@
* @max_nominal_system_clk_rate: max freq at which system clock can run in
nominal mode.
* @sdp_check: SDP detection work in case of USB_FLOAT power supply
+ * @notify_charger_work: Charger notification work.
*/
struct msm_otg {
struct usb_phy phy;
@@ -191,8 +222,11 @@
int async_int;
unsigned int cur_power;
struct workqueue_struct *otg_wq;
+ struct delayed_work chg_work;
struct delayed_work id_status_work;
+ enum usb_chg_state chg_state;
enum usb_chg_type chg_type;
+ bool chg_detection;
unsigned int dcd_time;
unsigned long caps;
uint32_t bus_perf_client;
@@ -278,7 +312,7 @@
struct pm_qos_request pm_qos_req_dma;
struct delayed_work perf_vote_work;
struct delayed_work sdp_check;
- struct work_struct notify_chg_current_work;
+ struct work_struct notify_charger_work;
};
struct ci13xxx_platform_data {
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 7e5430d..08b1b7b 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -662,6 +662,9 @@
/* The controller does not support WRITE SAME */
unsigned no_write_same:1;
+ /* Inline encryption support? */
+ unsigned inlinecrypt_support:1;
+
unsigned use_blk_mq:1;
unsigned use_cmd_list:1;
diff --git a/include/soc/qcom/pm-legacy.h b/include/soc/qcom/pm-legacy.h
new file mode 100644
index 0000000..7fdb0cd
--- /dev/null
+++ b/include/soc/qcom/pm-legacy.h
@@ -0,0 +1,215 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2016, 2018, The Linux Foundation. All rights reserved.
+ * Author: San Mehat <san@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_PM_H
+#define __ARCH_ARM_MACH_MSM_PM_H
+
+#include <linux/types.h>
+#include <linux/cpuidle.h>
+#include <asm/smp_plat.h>
+#include <asm/barrier.h>
+#include <dt-bindings/msm/pm.h>
+
+#if !defined(CONFIG_SMP)
+#define msm_secondary_startup NULL
+#elif defined(CONFIG_CPU_V7)
+#define msm_secondary_startup secondary_startup
+#else
+#define msm_secondary_startup secondary_holding_pen
+#endif
+
+enum msm_pm_sleep_mode {
+ MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT,
+ MSM_PM_SLEEP_MODE_RETENTION,
+ MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE,
+ MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
+ MSM_PM_SLEEP_MODE_FASTPC,
+ MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND,
+ MSM_PM_SLEEP_MODE_NR,
+ MSM_PM_SLEEP_MODE_NOT_SELECTED,
+};
+
+enum msm_pm_l2_scm_flag {
+ MSM_SCM_L2_ON = 0,
+ MSM_SCM_L2_OFF = 1,
+ MSM_SCM_L2_GDHS = 3,
+ MSM_SCM_L3_PC_OFF = 4,
+};
+
+#define MSM_PM_MODE(cpu, mode_nr) ((cpu) *MSM_PM_SLEEP_MODE_NR + (mode_nr))
+
+struct msm_pm_time_params {
+ uint32_t latency_us;
+ uint32_t sleep_us;
+ uint32_t next_event_us;
+ uint32_t modified_time_us;
+};
+
+struct msm_pm_sleep_status_data {
+ void __iomem *base_addr;
+ uint32_t mask;
+};
+
+struct latency_level {
+ int affinity_level;
+ int reset_level;
+ const char *level_name;
+};
+
+/**
+ * lpm_cpu_pre_pc_cb(): API to get the L2 flag to pass to TZ
+ *
+ * @cpu: cpuid of the CPU going down.
+ *
+ * Returns the l2 flush flag enum that is passed down to TZ during power
+ * collaps
+ */
+enum msm_pm_l2_scm_flag lpm_cpu_pre_pc_cb(unsigned int cpu);
+
+/**
+ * msm_pm_sleep_mode_allow() - API to determine if sleep mode is allowed.
+ * @cpu: CPU on which to check for the sleep mode.
+ * @mode: Sleep Mode to check for.
+ * @idle: Idle or Suspend Sleep Mode.
+ *
+ * Helper function to determine if a Idle or Suspend
+ * Sleep mode is allowed for a specific CPU.
+ *
+ * Return: 1 for allowed; 0 if not allowed.
+ */
+int msm_pm_sleep_mode_allow(unsigned int cpu, unsigned int mode, bool idle);
+
+/**
+ * msm_pm_sleep_mode_supported() - API to determine if sleep mode is
+ * supported.
+ * @cpu: CPU on which to check for the sleep mode.
+ * @mode: Sleep Mode to check for.
+ * @idle: Idle or Suspend Sleep Mode.
+ *
+ * Helper function to determine if a Idle or Suspend
+ * Sleep mode is allowed and enabled for a specific CPU.
+ *
+ * Return: 1 for supported; 0 if not supported.
+ */
+int msm_pm_sleep_mode_supported(unsigned int cpu, unsigned int mode, bool idle);
+
+struct msm_pm_cpr_ops {
+ void (*cpr_suspend)(void);
+ void (*cpr_resume)(void);
+};
+
+void __init msm_pm_set_tz_retention_flag(unsigned int flag);
+void msm_pm_enable_retention(bool enable);
+bool msm_pm_retention_enabled(void);
+bool msm_cpu_pm_enter_sleep(enum msm_pm_sleep_mode mode, bool from_idle);
+static inline void msm_arch_idle(void)
+{
+ /* memory barrier */
+ mb();
+ wfi();
+}
+
+#ifdef CONFIG_MSM_PM_LEGACY
+
+void msm_pm_set_rpm_wakeup_irq(unsigned int irq);
+int msm_pm_wait_cpu_shutdown(unsigned int cpu);
+int __init msm_pm_sleep_status_init(void);
+void lpm_cpu_hotplug_enter(unsigned int cpu);
+s32 msm_cpuidle_get_deep_idle_latency(void);
+int msm_pm_collapse(unsigned long unused);
+
+/**
+ * lpm_get_latency() - API to get latency for a low power mode
+ * @latency_level: pointer to structure with below elements
+ * affinity_level: The level (CPU/L2/CCI etc.) for which the
+ * latency is required.
+ * LPM_AFF_LVL_CPU : CPU level
+ * LPM_AFF_LVL_L2 : L2 level
+ * LPM_AFF_LVL_CCI : CCI level
+ * reset_level: Can be passed "LPM_RESET_LVL_GDHS" for
+ * low power mode with control logic power collapse or
+ * "LPM_RESET_LVL_PC" for low power mode with control and
+ * memory logic power collapse or "LPM_RESET_LVL_RET" for
+ * retention mode.
+ * level_name: Pointer to the cluster name for which the latency
+ * is required or NULL if the minimum value out of all the
+ * clusters is to be returned. For CPU level, the name of the
+ * L2 cluster to be passed. For CCI it has no effect.
+ * @latency: address to get the latency value.
+ *
+ * latency value will be for the particular cluster or the minimum
+ * value out of all the clusters at the particular affinity_level
+ * and reset_level.
+ *
+ * Return: 0 for success; Error number for failure.
+ */
+int lpm_get_latency(struct latency_level *level, uint32_t *latency);
+
+#else
+static inline void msm_pm_set_rpm_wakeup_irq(unsigned int irq) {}
+static inline int msm_pm_wait_cpu_shutdown(unsigned int cpu) { return 0; }
+static inline int msm_pm_sleep_status_init(void) { return 0; };
+
+static inline void lpm_cpu_hotplug_enter(unsigned int cpu)
+{
+ msm_arch_idle();
+};
+
+static inline s32 msm_cpuidle_get_deep_idle_latency(void) { return 0; }
+#define msm_pm_collapse NULL
+
+static inline int lpm_get_latency(struct latency_level *level,
+ uint32_t *latency)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_HOTPLUG_CPU
+int msm_platform_secondary_init(unsigned int cpu);
+#else
+static inline int msm_platform_secondary_init(unsigned int cpu) { return 0; }
+#endif
+
+enum msm_pm_time_stats_id {
+ MSM_PM_STAT_REQUESTED_IDLE = 0,
+ MSM_PM_STAT_IDLE_SPIN,
+ MSM_PM_STAT_IDLE_WFI,
+ MSM_PM_STAT_RETENTION,
+ MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
+ MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE,
+ MSM_PM_STAT_IDLE_POWER_COLLAPSE,
+ MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE,
+ MSM_PM_STAT_SUSPEND,
+ MSM_PM_STAT_FAILED_SUSPEND,
+ MSM_PM_STAT_NOT_IDLE,
+ MSM_PM_STAT_COUNT
+};
+
+#ifdef CONFIG_MSM_IDLE_STATS
+void msm_pm_add_stats(enum msm_pm_time_stats_id *enable_stats, int size);
+void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t);
+void msm_pm_l2_add_stat(uint32_t id, int64_t t);
+#else
+static inline void msm_pm_add_stats(enum msm_pm_time_stats_id *enable_stats,
+ int size) {}
+static inline void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t) {}
+static inline void msm_pm_l2_add_stat(uint32_t id, int64_t t) {}
+#endif
+
+void msm_pm_set_cpr_ops(struct msm_pm_cpr_ops *ops);
+extern dma_addr_t msm_pc_debug_counters_phys;
+#endif /* __ARCH_ARM_MACH_MSM_PM_H */
diff --git a/include/soc/qcom/spm.h b/include/soc/qcom/spm.h
index d0f76d0..a6f38cd 100644
--- a/include/soc/qcom/spm.h
+++ b/include/soc/qcom/spm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,7 @@
#if defined(CONFIG_MSM_SPM)
int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm);
+void msm_spm_set_rpm_hs(bool allow_rpm_hs);
int msm_spm_probe_done(void);
int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel);
int msm_spm_get_vdd(unsigned int cpu);
@@ -43,6 +44,8 @@
struct msm_spm_device *msm_spm_get_device_by_name(const char *name);
int msm_spm_config_low_power_mode(struct msm_spm_device *dev,
unsigned int mode, bool notify_rpm);
+int msm_spm_config_low_power_mode_addr(struct msm_spm_device *dev,
+ unsigned int mode, bool notify_rpm);
int msm_spm_device_init(void);
bool msm_spm_is_mode_avail(unsigned int mode);
void msm_spm_dump_regs(unsigned int cpu);
@@ -80,6 +83,8 @@
return -ENODEV;
}
+static inline void msm_spm_set_rpm_hs(bool allow_rpm_hs) {}
+
static inline int msm_spm_probe_done(void)
{
return -ENODEV;
@@ -114,6 +119,13 @@
{
return -ENODEV;
}
+
+static inline int msm_spm_config_low_power_mode_addr(
+ struct msm_spm_device *dev, unsigned int mode, bool notify_rpm)
+{
+ return -ENODEV;
+}
+
static inline struct msm_spm_device *msm_spm_get_device_by_name(
const char *name)
{
diff --git a/include/trace/events/pdc.h b/include/trace/events/pdc.h
index 400e959..fca0548 100644
--- a/include/trace/events/pdc.h
+++ b/include/trace/events/pdc.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -41,7 +41,7 @@
),
TP_printk("%s hwirq:%u pin:%u type:%u enable:%u",
- __entry->func, __entry->pin, __entry->hwirq, __entry->type,
+ __entry->func, __entry->hwirq, __entry->pin, __entry->type,
__entry->enable)
);
diff --git a/include/trace/events/trace_msm_low_power.h b/include/trace/events/trace_msm_low_power.h
index c25da0e..54c1272 100644
--- a/include/trace/events/trace_msm_low_power.h
+++ b/include/trace/events/trace_msm_low_power.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -250,6 +250,25 @@
__entry->sample, __entry->tmr)
);
+TRACE_EVENT(pre_pc_cb,
+
+ TP_PROTO(int tzflag),
+
+ TP_ARGS(tzflag),
+
+ TP_STRUCT__entry(
+ __field(int, tzflag)
+ ),
+
+ TP_fast_assign(
+ __entry->tzflag = tzflag;
+ ),
+
+ TP_printk("tzflag:%d",
+ __entry->tzflag
+ )
+);
+
#endif
#define TRACE_INCLUDE_FILE trace_msm_low_power
#include <trace/define_trace.h>
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index c9248e5a..5da3634 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -472,6 +472,8 @@
header-y += v4l2-dv-timings.h
header-y += v4l2-mediabus.h
header-y += v4l2-subdev.h
+header-y += msm_vidc_dec.h
+header-y += msm_vidc_enc.h
header-y += veth.h
header-y += vfio.h
header-y += vhost.h
diff --git a/include/uapi/linux/bgcom_interface.h b/include/uapi/linux/bgcom_interface.h
index f18280a..1ee7b87 100644
--- a/include/uapi/linux/bgcom_interface.h
+++ b/include/uapi/linux/bgcom_interface.h
@@ -19,6 +19,7 @@
#define BGCOM_REG_WRITE 5
#define BGCOM_SOFT_RESET 6
#define BGCOM_MODEM_DOWN2_BG 7
+#define BGCOM_TWM_EXIT 8
#define EXCHANGE_CODE 'V'
struct bg_ui_data {
@@ -57,6 +58,9 @@
#define BG_SOFT_RESET \
_IOWR(EXCHANGE_CODE, BGCOM_SOFT_RESET, \
struct bg_ui_data)
+#define BG_TWM_EXIT \
+ _IOWR(EXCHANGE_CODE, BGCOM_TWM_EXIT, \
+ struct bg_ui_data)
#define BG_MODEM_DOWN2_BG_DONE \
_IOWR(EXCHANGE_CODE, BGCOM_MODEM_DOWN2_BG, \
struct bg_ui_data)
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 12263e4..dfcf371 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -273,6 +273,7 @@
#define FS_ENCRYPTION_MODE_AES_256_CTS 4
#define FS_ENCRYPTION_MODE_AES_128_CBC 5
#define FS_ENCRYPTION_MODE_AES_128_CTS 6
+#define FS_ENCRYPTION_MODE_PRIVATE 127
struct fscrypt_policy {
__u8 version;
diff --git a/include/uapi/linux/msm_vidc_dec.h b/include/uapi/linux/msm_vidc_dec.h
new file mode 100644
index 0000000..46af82b
--- /dev/null
+++ b/include/uapi/linux/msm_vidc_dec.h
@@ -0,0 +1,629 @@
+#ifndef _UAPI_MSM_VIDC_DEC_H_
+#define _UAPI_MSM_VIDC_DEC_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/* STATUS CODES */
+/* Base value for status codes */
+#define VDEC_S_BASE 0x40000000
+/* Success */
+#define VDEC_S_SUCCESS (VDEC_S_BASE)
+/* General failure */
+#define VDEC_S_EFAIL (VDEC_S_BASE + 1)
+/* Fatal irrecoverable failure. Need to tear down session. */
+#define VDEC_S_EFATAL (VDEC_S_BASE + 2)
+/* Error detected in the passed parameters */
+#define VDEC_S_EBADPARAM (VDEC_S_BASE + 3)
+/* Command called in invalid state. */
+#define VDEC_S_EINVALSTATE (VDEC_S_BASE + 4)
+ /* Insufficient OS resources - thread, memory etc. */
+#define VDEC_S_ENOSWRES (VDEC_S_BASE + 5)
+ /* Insufficient HW resources - core capacity maxed out. */
+#define VDEC_S_ENOHWRES (VDEC_S_BASE + 6)
+/* Invalid command called */
+#define VDEC_S_EINVALCMD (VDEC_S_BASE + 7)
+/* Command timeout. */
+#define VDEC_S_ETIMEOUT (VDEC_S_BASE + 8)
+/* Pre-requirement is not met for API. */
+#define VDEC_S_ENOPREREQ (VDEC_S_BASE + 9)
+/* Command queue is full. */
+#define VDEC_S_ECMDQFULL (VDEC_S_BASE + 10)
+/* Command is not supported by this driver */
+#define VDEC_S_ENOTSUPP (VDEC_S_BASE + 11)
+/* Command is not implemented by thedriver. */
+#define VDEC_S_ENOTIMPL (VDEC_S_BASE + 12)
+/* Command is not implemented by the driver. */
+#define VDEC_S_BUSY (VDEC_S_BASE + 13)
+#define VDEC_S_INPUT_BITSTREAM_ERR (VDEC_S_BASE + 14)
+
+#define VDEC_INTF_VER 1
+#define VDEC_MSG_BASE 0x0000000
+/*
+ *Codes to identify asynchronous message responses and events that driver
+ *wants to communicate to the app.
+ */
+#define VDEC_MSG_INVALID (VDEC_MSG_BASE + 0)
+#define VDEC_MSG_RESP_INPUT_BUFFER_DONE (VDEC_MSG_BASE + 1)
+#define VDEC_MSG_RESP_OUTPUT_BUFFER_DONE (VDEC_MSG_BASE + 2)
+#define VDEC_MSG_RESP_INPUT_FLUSHED (VDEC_MSG_BASE + 3)
+#define VDEC_MSG_RESP_OUTPUT_FLUSHED (VDEC_MSG_BASE + 4)
+#define VDEC_MSG_RESP_FLUSH_INPUT_DONE (VDEC_MSG_BASE + 5)
+#define VDEC_MSG_RESP_FLUSH_OUTPUT_DONE (VDEC_MSG_BASE + 6)
+#define VDEC_MSG_RESP_START_DONE (VDEC_MSG_BASE + 7)
+#define VDEC_MSG_RESP_STOP_DONE (VDEC_MSG_BASE + 8)
+#define VDEC_MSG_RESP_PAUSE_DONE (VDEC_MSG_BASE + 9)
+#define VDEC_MSG_RESP_RESUME_DONE (VDEC_MSG_BASE + 10)
+#define VDEC_MSG_RESP_RESOURCE_LOADED (VDEC_MSG_BASE + 11)
+#define VDEC_EVT_RESOURCES_LOST (VDEC_MSG_BASE + 12)
+#define VDEC_MSG_EVT_CONFIG_CHANGED (VDEC_MSG_BASE + 13)
+#define VDEC_MSG_EVT_HW_ERROR (VDEC_MSG_BASE + 14)
+#define VDEC_MSG_EVT_INFO_CONFIG_CHANGED (VDEC_MSG_BASE + 15)
+#define VDEC_MSG_EVT_INFO_FIELD_DROPPED (VDEC_MSG_BASE + 16)
+#define VDEC_MSG_EVT_HW_OVERLOAD (VDEC_MSG_BASE + 17)
+#define VDEC_MSG_EVT_MAX_CLIENTS (VDEC_MSG_BASE + 18)
+#define VDEC_MSG_EVT_HW_UNSUPPORTED (VDEC_MSG_BASE + 19)
+
+/*Buffer flags bits masks.*/
+#define VDEC_BUFFERFLAG_EOS 0x00000001
+#define VDEC_BUFFERFLAG_DECODEONLY 0x00000004
+#define VDEC_BUFFERFLAG_DATACORRUPT 0x00000008
+#define VDEC_BUFFERFLAG_ENDOFFRAME 0x00000010
+#define VDEC_BUFFERFLAG_SYNCFRAME 0x00000020
+#define VDEC_BUFFERFLAG_EXTRADATA 0x00000040
+#define VDEC_BUFFERFLAG_CODECCONFIG 0x00000080
+
+/*Post processing flags bit masks*/
+#define VDEC_EXTRADATA_NONE 0x001
+#define VDEC_EXTRADATA_QP 0x004
+#define VDEC_EXTRADATA_MB_ERROR_MAP 0x008
+#define VDEC_EXTRADATA_SEI 0x010
+#define VDEC_EXTRADATA_VUI 0x020
+#define VDEC_EXTRADATA_VC1 0x040
+
+#define VDEC_EXTRADATA_EXT_DATA 0x0800
+#define VDEC_EXTRADATA_USER_DATA 0x1000
+#define VDEC_EXTRADATA_EXT_BUFFER 0x2000
+
+#define VDEC_CMDBASE 0x800
+#define VDEC_CMD_SET_INTF_VERSION (VDEC_CMDBASE)
+
+#define VDEC_IOCTL_MAGIC 'v'
+
+struct vdec_ioctl_msg {
+ void __user *in;
+ void __user *out;
+};
+
+/*
+ * CMD params: InputParam:enum vdec_codec
+ * OutputParam: struct vdec_profile_level
+ */
+#define VDEC_IOCTL_GET_PROFILE_LEVEL_SUPPORTED \
+ _IOWR(VDEC_IOCTL_MAGIC, 0, struct vdec_ioctl_msg)
+
+/*
+ * CMD params:InputParam: NULL
+ * OutputParam: uint32_t(bitmask)
+ */
+#define VDEC_IOCTL_GET_INTERLACE_FORMAT \
+ _IOR(VDEC_IOCTL_MAGIC, 1, struct vdec_ioctl_msg)
+
+/*
+ * CMD params: InputParam: enum vdec_codec
+ * OutputParam: struct vdec_profile_level
+ */
+#define VDEC_IOCTL_GET_CURRENT_PROFILE_LEVEL \
+ _IOWR(VDEC_IOCTL_MAGIC, 2, struct vdec_ioctl_msg)
+
+/*
+ * CMD params: SET: InputParam: enum vdec_output_fromat OutputParam: NULL
+ * GET: InputParam: NULL OutputParam: enum vdec_output_fromat
+ */
+#define VDEC_IOCTL_SET_OUTPUT_FORMAT \
+ _IOWR(VDEC_IOCTL_MAGIC, 3, struct vdec_ioctl_msg)
+#define VDEC_IOCTL_GET_OUTPUT_FORMAT \
+ _IOWR(VDEC_IOCTL_MAGIC, 4, struct vdec_ioctl_msg)
+
+/*
+ * CMD params: SET: InputParam: enum vdec_codec OutputParam: NULL
+ * GET: InputParam: NULL OutputParam: enum vdec_codec
+ */
+#define VDEC_IOCTL_SET_CODEC \
+ _IOW(VDEC_IOCTL_MAGIC, 5, struct vdec_ioctl_msg)
+#define VDEC_IOCTL_GET_CODEC \
+ _IOR(VDEC_IOCTL_MAGIC, 6, struct vdec_ioctl_msg)
+
+/*
+ * CMD params: SET: InputParam: struct vdec_picsize outputparam: NULL
+ * GET: InputParam: NULL outputparam: struct vdec_picsize
+ */
+#define VDEC_IOCTL_SET_PICRES \
+ _IOW(VDEC_IOCTL_MAGIC, 7, struct vdec_ioctl_msg)
+#define VDEC_IOCTL_GET_PICRES \
+ _IOR(VDEC_IOCTL_MAGIC, 8, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_EXTRADATA \
+ _IOW(VDEC_IOCTL_MAGIC, 9, struct vdec_ioctl_msg)
+#define VDEC_IOCTL_GET_EXTRADATA \
+ _IOR(VDEC_IOCTL_MAGIC, 10, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_SEQUENCE_HEADER \
+ _IOW(VDEC_IOCTL_MAGIC, 11, struct vdec_ioctl_msg)
+
+/*
+ * CMD params: SET: InputParam - vdec_allocatorproperty, OutputParam - NULL
+ * GET: InputParam - NULL, OutputParam - vdec_allocatorproperty
+ */
+#define VDEC_IOCTL_SET_BUFFER_REQ \
+ _IOW(VDEC_IOCTL_MAGIC, 12, struct vdec_ioctl_msg)
+#define VDEC_IOCTL_GET_BUFFER_REQ \
+ _IOR(VDEC_IOCTL_MAGIC, 13, struct vdec_ioctl_msg)
+/* CMD params: InputParam - vdec_buffer, OutputParam - uint8_t** */
+#define VDEC_IOCTL_ALLOCATE_BUFFER \
+ _IOWR(VDEC_IOCTL_MAGIC, 14, struct vdec_ioctl_msg)
+/* CMD params: InputParam - uint8_t *, OutputParam - NULL.*/
+#define VDEC_IOCTL_FREE_BUFFER \
+ _IOW(VDEC_IOCTL_MAGIC, 15, struct vdec_ioctl_msg)
+
+/*CMD params: CMD: InputParam - struct vdec_setbuffer_cmd, OutputParam - NULL*/
+#define VDEC_IOCTL_SET_BUFFER \
+ _IOW(VDEC_IOCTL_MAGIC, 16, struct vdec_ioctl_msg)
+
+/* CMD params: InputParam - struct vdec_fillbuffer_cmd, OutputParam - NULL*/
+#define VDEC_IOCTL_FILL_OUTPUT_BUFFER \
+ _IOW(VDEC_IOCTL_MAGIC, 17, struct vdec_ioctl_msg)
+
+/*CMD params: InputParam - struct vdec_frameinfo , OutputParam - NULL*/
+#define VDEC_IOCTL_DECODE_FRAME \
+ _IOW(VDEC_IOCTL_MAGIC, 18, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_LOAD_RESOURCES _IO(VDEC_IOCTL_MAGIC, 19)
+#define VDEC_IOCTL_CMD_START _IO(VDEC_IOCTL_MAGIC, 20)
+#define VDEC_IOCTL_CMD_STOP _IO(VDEC_IOCTL_MAGIC, 21)
+#define VDEC_IOCTL_CMD_PAUSE _IO(VDEC_IOCTL_MAGIC, 22)
+#define VDEC_IOCTL_CMD_RESUME _IO(VDEC_IOCTL_MAGIC, 23)
+
+/*CMD params: InputParam - enum vdec_bufferflush , OutputParam - NULL */
+#define VDEC_IOCTL_CMD_FLUSH _IOW(VDEC_IOCTL_MAGIC, 24, struct vdec_ioctl_msg)
+
+/* ========================================================
+ * IOCTL for getting asynchronous notification from driver
+ * ========================================================
+ */
+
+/*IOCTL params: InputParam - NULL, OutputParam - struct vdec_msginfo*/
+#define VDEC_IOCTL_GET_NEXT_MSG \
+ _IOR(VDEC_IOCTL_MAGIC, 25, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_STOP_NEXT_MSG _IO(VDEC_IOCTL_MAGIC, 26)
+
+#define VDEC_IOCTL_GET_NUMBER_INSTANCES \
+ _IOR(VDEC_IOCTL_MAGIC, 27, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_PICTURE_ORDER \
+ _IOW(VDEC_IOCTL_MAGIC, 28, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_FRAME_RATE \
+ _IOW(VDEC_IOCTL_MAGIC, 29, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_H264_MV_BUFFER \
+ _IOW(VDEC_IOCTL_MAGIC, 30, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_FREE_H264_MV_BUFFER \
+ _IOW(VDEC_IOCTL_MAGIC, 31, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_GET_MV_BUFFER_SIZE \
+ _IOR(VDEC_IOCTL_MAGIC, 32, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_IDR_ONLY_DECODING \
+ _IO(VDEC_IOCTL_MAGIC, 33)
+
+#define VDEC_IOCTL_SET_CONT_ON_RECONFIG \
+ _IO(VDEC_IOCTL_MAGIC, 34)
+
+#define VDEC_IOCTL_SET_DISABLE_DMX \
+ _IOW(VDEC_IOCTL_MAGIC, 35, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_GET_DISABLE_DMX \
+ _IOR(VDEC_IOCTL_MAGIC, 36, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_GET_DISABLE_DMX_SUPPORT \
+ _IOR(VDEC_IOCTL_MAGIC, 37, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_PERF_CLK \
+ _IOR(VDEC_IOCTL_MAGIC, 38, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_SET_META_BUFFERS \
+ _IOW(VDEC_IOCTL_MAGIC, 39, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_FREE_META_BUFFERS \
+ _IO(VDEC_IOCTL_MAGIC, 40)
+
+enum vdec_picture {
+ PICTURE_TYPE_I,
+ PICTURE_TYPE_P,
+ PICTURE_TYPE_B,
+ PICTURE_TYPE_BI,
+ PICTURE_TYPE_SKIP,
+ PICTURE_TYPE_IDR,
+ PICTURE_TYPE_UNKNOWN
+};
+
+enum vdec_buffer {
+ VDEC_BUFFER_TYPE_INPUT,
+ VDEC_BUFFER_TYPE_OUTPUT
+};
+
+struct vdec_allocatorproperty {
+ enum vdec_buffer buffer_type;
+ uint32_t mincount;
+ uint32_t maxcount;
+ uint32_t actualcount;
+ size_t buffer_size;
+ uint32_t alignment;
+ uint32_t buf_poolid;
+ size_t meta_buffer_size;
+};
+
+struct vdec_bufferpayload {
+ void __user *bufferaddr;
+ size_t buffer_len;
+ int pmem_fd;
+ size_t offset;
+ size_t mmaped_size;
+};
+
+struct vdec_setbuffer_cmd {
+ enum vdec_buffer buffer_type;
+ struct vdec_bufferpayload buffer;
+};
+
+struct vdec_fillbuffer_cmd {
+ struct vdec_bufferpayload buffer;
+ void *client_data;
+};
+
+enum vdec_bufferflush {
+ VDEC_FLUSH_TYPE_INPUT,
+ VDEC_FLUSH_TYPE_OUTPUT,
+ VDEC_FLUSH_TYPE_ALL
+};
+
+enum vdec_codec {
+ VDEC_CODECTYPE_H264 = 0x1,
+ VDEC_CODECTYPE_H263 = 0x2,
+ VDEC_CODECTYPE_MPEG4 = 0x3,
+ VDEC_CODECTYPE_DIVX_3 = 0x4,
+ VDEC_CODECTYPE_DIVX_4 = 0x5,
+ VDEC_CODECTYPE_DIVX_5 = 0x6,
+ VDEC_CODECTYPE_DIVX_6 = 0x7,
+ VDEC_CODECTYPE_XVID = 0x8,
+ VDEC_CODECTYPE_MPEG1 = 0x9,
+ VDEC_CODECTYPE_MPEG2 = 0xa,
+ VDEC_CODECTYPE_VC1 = 0xb,
+ VDEC_CODECTYPE_VC1_RCV = 0xc,
+ VDEC_CODECTYPE_HEVC = 0xd,
+ VDEC_CODECTYPE_MVC = 0xe,
+ VDEC_CODECTYPE_VP8 = 0xf,
+ VDEC_CODECTYPE_VP9 = 0x10,
+};
+
+enum vdec_mpeg2_profile {
+ VDEC_MPEG2ProfileSimple = 0x1,
+ VDEC_MPEG2ProfileMain = 0x2,
+ VDEC_MPEG2Profile422 = 0x4,
+ VDEC_MPEG2ProfileSNR = 0x8,
+ VDEC_MPEG2ProfileSpatial = 0x10,
+ VDEC_MPEG2ProfileHigh = 0x20,
+ VDEC_MPEG2ProfileKhronosExtensions = 0x6F000000,
+ VDEC_MPEG2ProfileVendorStartUnused = 0x7F000000,
+ VDEC_MPEG2ProfileMax = 0x7FFFFFFF
+};
+
+enum vdec_mpeg2_level {
+
+ VDEC_MPEG2LevelLL = 0x1,
+ VDEC_MPEG2LevelML = 0x2,
+ VDEC_MPEG2LevelH14 = 0x4,
+ VDEC_MPEG2LevelHL = 0x8,
+ VDEC_MPEG2LevelKhronosExtensions = 0x6F000000,
+ VDEC_MPEG2LevelVendorStartUnused = 0x7F000000,
+ VDEC_MPEG2LevelMax = 0x7FFFFFFF
+};
+
+enum vdec_mpeg4_profile {
+ VDEC_MPEG4ProfileSimple = 0x01,
+ VDEC_MPEG4ProfileSimpleScalable = 0x02,
+ VDEC_MPEG4ProfileCore = 0x04,
+ VDEC_MPEG4ProfileMain = 0x08,
+ VDEC_MPEG4ProfileNbit = 0x10,
+ VDEC_MPEG4ProfileScalableTexture = 0x20,
+ VDEC_MPEG4ProfileSimpleFace = 0x40,
+ VDEC_MPEG4ProfileSimpleFBA = 0x80,
+ VDEC_MPEG4ProfileBasicAnimated = 0x100,
+ VDEC_MPEG4ProfileHybrid = 0x200,
+ VDEC_MPEG4ProfileAdvancedRealTime = 0x400,
+ VDEC_MPEG4ProfileCoreScalable = 0x800,
+ VDEC_MPEG4ProfileAdvancedCoding = 0x1000,
+ VDEC_MPEG4ProfileAdvancedCore = 0x2000,
+ VDEC_MPEG4ProfileAdvancedScalable = 0x4000,
+ VDEC_MPEG4ProfileAdvancedSimple = 0x8000,
+ VDEC_MPEG4ProfileKhronosExtensions = 0x6F000000,
+ VDEC_MPEG4ProfileVendorStartUnused = 0x7F000000,
+ VDEC_MPEG4ProfileMax = 0x7FFFFFFF
+};
+
+enum vdec_mpeg4_level {
+ VDEC_MPEG4Level0 = 0x01,
+ VDEC_MPEG4Level0b = 0x02,
+ VDEC_MPEG4Level1 = 0x04,
+ VDEC_MPEG4Level2 = 0x08,
+ VDEC_MPEG4Level3 = 0x10,
+ VDEC_MPEG4Level4 = 0x20,
+ VDEC_MPEG4Level4a = 0x40,
+ VDEC_MPEG4Level5 = 0x80,
+ VDEC_MPEG4LevelKhronosExtensions = 0x6F000000,
+ VDEC_MPEG4LevelVendorStartUnused = 0x7F000000,
+ VDEC_MPEG4LevelMax = 0x7FFFFFFF
+};
+
+enum vdec_avc_profile {
+ VDEC_AVCProfileBaseline = 0x01,
+ VDEC_AVCProfileMain = 0x02,
+ VDEC_AVCProfileExtended = 0x04,
+ VDEC_AVCProfileHigh = 0x08,
+ VDEC_AVCProfileHigh10 = 0x10,
+ VDEC_AVCProfileHigh422 = 0x20,
+ VDEC_AVCProfileHigh444 = 0x40,
+ VDEC_AVCProfileKhronosExtensions = 0x6F000000,
+ VDEC_AVCProfileVendorStartUnused = 0x7F000000,
+ VDEC_AVCProfileMax = 0x7FFFFFFF
+};
+
+enum vdec_avc_level {
+ VDEC_AVCLevel1 = 0x01,
+ VDEC_AVCLevel1b = 0x02,
+ VDEC_AVCLevel11 = 0x04,
+ VDEC_AVCLevel12 = 0x08,
+ VDEC_AVCLevel13 = 0x10,
+ VDEC_AVCLevel2 = 0x20,
+ VDEC_AVCLevel21 = 0x40,
+ VDEC_AVCLevel22 = 0x80,
+ VDEC_AVCLevel3 = 0x100,
+ VDEC_AVCLevel31 = 0x200,
+ VDEC_AVCLevel32 = 0x400,
+ VDEC_AVCLevel4 = 0x800,
+ VDEC_AVCLevel41 = 0x1000,
+ VDEC_AVCLevel42 = 0x2000,
+ VDEC_AVCLevel5 = 0x4000,
+ VDEC_AVCLevel51 = 0x8000,
+ VDEC_AVCLevelKhronosExtensions = 0x6F000000,
+ VDEC_AVCLevelVendorStartUnused = 0x7F000000,
+ VDEC_AVCLevelMax = 0x7FFFFFFF
+};
+
+enum vdec_divx_profile {
+ VDEC_DIVXProfile_qMobile = 0x01,
+ VDEC_DIVXProfile_Mobile = 0x02,
+ VDEC_DIVXProfile_HD = 0x04,
+ VDEC_DIVXProfile_Handheld = 0x08,
+ VDEC_DIVXProfile_Portable = 0x10,
+ VDEC_DIVXProfile_HomeTheater = 0x20
+};
+
+enum vdec_xvid_profile {
+ VDEC_XVIDProfile_Simple = 0x1,
+ VDEC_XVIDProfile_Advanced_Realtime_Simple = 0x2,
+ VDEC_XVIDProfile_Advanced_Simple = 0x4
+};
+
+enum vdec_xvid_level {
+ VDEC_XVID_LEVEL_S_L0 = 0x1,
+ VDEC_XVID_LEVEL_S_L1 = 0x2,
+ VDEC_XVID_LEVEL_S_L2 = 0x4,
+ VDEC_XVID_LEVEL_S_L3 = 0x8,
+ VDEC_XVID_LEVEL_ARTS_L1 = 0x10,
+ VDEC_XVID_LEVEL_ARTS_L2 = 0x20,
+ VDEC_XVID_LEVEL_ARTS_L3 = 0x40,
+ VDEC_XVID_LEVEL_ARTS_L4 = 0x80,
+ VDEC_XVID_LEVEL_AS_L0 = 0x100,
+ VDEC_XVID_LEVEL_AS_L1 = 0x200,
+ VDEC_XVID_LEVEL_AS_L2 = 0x400,
+ VDEC_XVID_LEVEL_AS_L3 = 0x800,
+ VDEC_XVID_LEVEL_AS_L4 = 0x1000
+};
+
+enum vdec_h263profile {
+ VDEC_H263ProfileBaseline = 0x01,
+ VDEC_H263ProfileH320Coding = 0x02,
+ VDEC_H263ProfileBackwardCompatible = 0x04,
+ VDEC_H263ProfileISWV2 = 0x08,
+ VDEC_H263ProfileISWV3 = 0x10,
+ VDEC_H263ProfileHighCompression = 0x20,
+ VDEC_H263ProfileInternet = 0x40,
+ VDEC_H263ProfileInterlace = 0x80,
+ VDEC_H263ProfileHighLatency = 0x100,
+ VDEC_H263ProfileKhronosExtensions = 0x6F000000,
+ VDEC_H263ProfileVendorStartUnused = 0x7F000000,
+ VDEC_H263ProfileMax = 0x7FFFFFFF
+};
+
+enum vdec_h263level {
+ VDEC_H263Level10 = 0x01,
+ VDEC_H263Level20 = 0x02,
+ VDEC_H263Level30 = 0x04,
+ VDEC_H263Level40 = 0x08,
+ VDEC_H263Level45 = 0x10,
+ VDEC_H263Level50 = 0x20,
+ VDEC_H263Level60 = 0x40,
+ VDEC_H263Level70 = 0x80,
+ VDEC_H263LevelKhronosExtensions = 0x6F000000,
+ VDEC_H263LevelVendorStartUnused = 0x7F000000,
+ VDEC_H263LevelMax = 0x7FFFFFFF
+};
+
+enum vdec_wmv_format {
+ VDEC_WMVFormatUnused = 0x01,
+ VDEC_WMVFormat7 = 0x02,
+ VDEC_WMVFormat8 = 0x04,
+ VDEC_WMVFormat9 = 0x08,
+ VDEC_WMFFormatKhronosExtensions = 0x6F000000,
+ VDEC_WMFFormatVendorStartUnused = 0x7F000000,
+ VDEC_WMVFormatMax = 0x7FFFFFFF
+};
+
+enum vdec_vc1_profile {
+ VDEC_VC1ProfileSimple = 0x1,
+ VDEC_VC1ProfileMain = 0x2,
+ VDEC_VC1ProfileAdvanced = 0x4
+};
+
+enum vdec_vc1_level {
+ VDEC_VC1_LEVEL_S_Low = 0x1,
+ VDEC_VC1_LEVEL_S_Medium = 0x2,
+ VDEC_VC1_LEVEL_M_Low = 0x4,
+ VDEC_VC1_LEVEL_M_Medium = 0x8,
+ VDEC_VC1_LEVEL_M_High = 0x10,
+ VDEC_VC1_LEVEL_A_L0 = 0x20,
+ VDEC_VC1_LEVEL_A_L1 = 0x40,
+ VDEC_VC1_LEVEL_A_L2 = 0x80,
+ VDEC_VC1_LEVEL_A_L3 = 0x100,
+ VDEC_VC1_LEVEL_A_L4 = 0x200
+};
+
+struct vdec_profile_level {
+ uint32_t profiles;
+ uint32_t levels;
+};
+
+enum vdec_interlaced_format {
+ VDEC_InterlaceFrameProgressive = 0x1,
+ VDEC_InterlaceInterleaveFrameTopFieldFirst = 0x2,
+ VDEC_InterlaceInterleaveFrameBottomFieldFirst = 0x4
+};
+
+#define VDEC_YUV_FORMAT_NV12_TP10_UBWC \
+ VDEC_YUV_FORMAT_NV12_TP10_UBWC
+
+enum vdec_output_fromat {
+ VDEC_YUV_FORMAT_NV12 = 0x1,
+ VDEC_YUV_FORMAT_TILE_4x2 = 0x2,
+ VDEC_YUV_FORMAT_NV12_UBWC = 0x3,
+ VDEC_YUV_FORMAT_NV12_TP10_UBWC = 0x4
+};
+
+enum vdec_output_order {
+ VDEC_ORDER_DISPLAY = 0x1,
+ VDEC_ORDER_DECODE = 0x2
+};
+
+struct vdec_picsize {
+ uint32_t frame_width;
+ uint32_t frame_height;
+ uint32_t stride;
+ uint32_t scan_lines;
+};
+
+struct vdec_seqheader {
+ void __user *ptr_seqheader;
+ size_t seq_header_len;
+ int pmem_fd;
+ size_t pmem_offset;
+};
+
+struct vdec_mberror {
+ void __user *ptr_errormap;
+ size_t err_mapsize;
+};
+
+struct vdec_input_frameinfo {
+ void __user *bufferaddr;
+ size_t offset;
+ size_t datalen;
+ uint32_t flags;
+ int64_t timestamp;
+ void *client_data;
+ int pmem_fd;
+ size_t pmem_offset;
+ void __user *desc_addr;
+ uint32_t desc_size;
+};
+
+struct vdec_framesize {
+ uint32_t left;
+ uint32_t top;
+ uint32_t right;
+ uint32_t bottom;
+};
+
+struct vdec_aspectratioinfo {
+ uint32_t aspect_ratio;
+ uint32_t par_width;
+ uint32_t par_height;
+};
+
+struct vdec_sep_metadatainfo {
+ void __user *metabufaddr;
+ uint32_t size;
+ int fd;
+ int offset;
+ uint32_t buffer_size;
+};
+
+struct vdec_output_frameinfo {
+ void __user *bufferaddr;
+ size_t offset;
+ size_t len;
+ uint32_t flags;
+ int64_t time_stamp;
+ enum vdec_picture pic_type;
+ void *client_data;
+ void *input_frame_clientdata;
+ struct vdec_picsize picsize;
+ struct vdec_framesize framesize;
+ enum vdec_interlaced_format interlaced_format;
+ struct vdec_aspectratioinfo aspect_ratio_info;
+ struct vdec_sep_metadatainfo metadata_info;
+};
+
+union vdec_msgdata {
+ struct vdec_output_frameinfo output_frame;
+ void *input_frame_clientdata;
+};
+
+struct vdec_msginfo {
+ uint32_t status_code;
+ uint32_t msgcode;
+ union vdec_msgdata msgdata;
+ size_t msgdatasize;
+};
+
+struct vdec_framerate {
+ unsigned long fps_denominator;
+ unsigned long fps_numerator;
+};
+
+struct vdec_h264_mv {
+ size_t size;
+ int count;
+ int pmem_fd;
+ int offset;
+};
+
+struct vdec_mv_buff_size {
+ int width;
+ int height;
+ int size;
+ int alignment;
+};
+
+struct vdec_meta_buffers {
+ size_t size;
+ int count;
+ int pmem_fd;
+ int pmem_fd_iommu;
+ int offset;
+};
+
+#endif /* end of macro _VDECDECODER_H_ */
diff --git a/include/uapi/linux/msm_vidc_enc.h b/include/uapi/linux/msm_vidc_enc.h
new file mode 100644
index 0000000..f4f1630
--- /dev/null
+++ b/include/uapi/linux/msm_vidc_enc.h
@@ -0,0 +1,752 @@
+#ifndef _UAPI_MSM_VIDC_ENC_H_
+#define _UAPI_MSM_VIDC_ENC_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/** STATUS CODES*/
+/* Base value for status codes */
+#define VEN_S_BASE 0x00000000
+#define VEN_S_SUCCESS (VEN_S_BASE)/* Success */
+#define VEN_S_EFAIL (VEN_S_BASE+1)/* General failure */
+#define VEN_S_EFATAL (VEN_S_BASE+2)/* Fatal irrecoverable failure*/
+#define VEN_S_EBADPARAM (VEN_S_BASE+3)/* Error passed parameters*/
+/*Command called in invalid state*/
+#define VEN_S_EINVALSTATE (VEN_S_BASE+4)
+#define VEN_S_ENOSWRES (VEN_S_BASE+5)/* Insufficient OS resources*/
+#define VEN_S_ENOHWRES (VEN_S_BASE+6)/*Insufficient HW resources */
+#define VEN_S_EBUFFREQ (VEN_S_BASE+7)/* Buffer requirements were not met*/
+#define VEN_S_EINVALCMD (VEN_S_BASE+8)/* Invalid command called */
+#define VEN_S_ETIMEOUT (VEN_S_BASE+9)/* Command timeout. */
+/*Re-attempt was made when multiple invocation not supported for API.*/
+#define VEN_S_ENOREATMPT (VEN_S_BASE+10)
+#define VEN_S_ENOPREREQ (VEN_S_BASE+11)/*Pre-requirement is not met for API*/
+#define VEN_S_ECMDQFULL (VEN_S_BASE+12)/*Command queue is full*/
+#define VEN_S_ENOTSUPP (VEN_S_BASE+13)/*Command not supported*/
+#define VEN_S_ENOTIMPL (VEN_S_BASE+14)/*Command not implemented.*/
+#define VEN_S_ENOTPMEM (VEN_S_BASE+15)/*Buffer is not from PMEM*/
+#define VEN_S_EFLUSHED (VEN_S_BASE+16)/*returned buffer was flushed*/
+#define VEN_S_EINSUFBUF (VEN_S_BASE+17)/*provided buffer size insufficient*/
+#define VEN_S_ESAMESTATE (VEN_S_BASE+18)
+#define VEN_S_EINVALTRANS (VEN_S_BASE+19)
+
+#define VEN_INTF_VER 1
+
+/*Asynchronous messages from driver*/
+#define VEN_MSG_INDICATION 0
+#define VEN_MSG_INPUT_BUFFER_DONE 1
+#define VEN_MSG_OUTPUT_BUFFER_DONE 2
+#define VEN_MSG_NEED_OUTPUT_BUFFER 3
+#define VEN_MSG_FLUSH_INPUT_DONE 4
+#define VEN_MSG_FLUSH_OUTPUT_DONE 5
+#define VEN_MSG_START 6
+#define VEN_MSG_STOP 7
+#define VEN_MSG_PAUSE 8
+#define VEN_MSG_RESUME 9
+#define VEN_MSG_STOP_READING_MSG 10
+#define VEN_MSG_LTRUSE_FAILED 11
+#define VEN_MSG_HW_OVERLOAD 12
+#define VEN_MSG_MAX_CLIENTS 13
+
+
+/*Buffer flags bits masks*/
+#define VEN_BUFFLAG_EOS 0x00000001
+#define VEN_BUFFLAG_ENDOFFRAME 0x00000010
+#define VEN_BUFFLAG_SYNCFRAME 0x00000020
+#define VEN_BUFFLAG_EXTRADATA 0x00000040
+#define VEN_BUFFLAG_CODECCONFIG 0x00000080
+
+/*Post processing flags bit masks*/
+#define VEN_EXTRADATA_NONE 0x001
+#define VEN_EXTRADATA_QCOMFILLER 0x002
+#define VEN_EXTRADATA_SLICEINFO 0x100
+#define VEN_EXTRADATA_LTRINFO 0x200
+#define VEN_EXTRADATA_MBINFO 0x400
+
+/*ENCODER CONFIGURATION CONSTANTS*/
+
+/*Encoded video frame types*/
+#define VEN_FRAME_TYPE_I 1/* I frame type */
+#define VEN_FRAME_TYPE_P 2/* P frame type */
+#define VEN_FRAME_TYPE_B 3/* B frame type */
+
+/*Video codec types*/
+#define VEN_CODEC_MPEG4 1/* MPEG4 Codec */
+#define VEN_CODEC_H264 2/* H.264 Codec */
+#define VEN_CODEC_H263 3/* H.263 Codec */
+
+/*Video codec profile types.*/
+#define VEN_PROFILE_MPEG4_SP 1/* 1 - MPEG4 SP profile */
+#define VEN_PROFILE_MPEG4_ASP 2/* 2 - MPEG4 ASP profile */
+#define VEN_PROFILE_H264_BASELINE 3/* 3 - H264 Baseline profile */
+#define VEN_PROFILE_H264_MAIN 4/* 4 - H264 Main profile */
+#define VEN_PROFILE_H264_HIGH 5/* 5 - H264 High profile */
+#define VEN_PROFILE_H263_BASELINE 6/* 6 - H263 Baseline profile */
+
+/*Video codec profile level types.*/
+#define VEN_LEVEL_MPEG4_0 0x1/* MPEG4 Level 0 */
+#define VEN_LEVEL_MPEG4_1 0x2/* MPEG4 Level 1 */
+#define VEN_LEVEL_MPEG4_2 0x3/* MPEG4 Level 2 */
+#define VEN_LEVEL_MPEG4_3 0x4/* MPEG4 Level 3 */
+#define VEN_LEVEL_MPEG4_4 0x5/* MPEG4 Level 4 */
+#define VEN_LEVEL_MPEG4_5 0x6/* MPEG4 Level 5 */
+#define VEN_LEVEL_MPEG4_3b 0x7/* MPEG4 Level 3b */
+#define VEN_LEVEL_MPEG4_6 0x8/* MPEG4 Level 6 */
+
+#define VEN_LEVEL_H264_1 0x9/* H.264 Level 1 */
+#define VEN_LEVEL_H264_1b 0xA/* H.264 Level 1b */
+#define VEN_LEVEL_H264_1p1 0xB/* H.264 Level 1.1 */
+#define VEN_LEVEL_H264_1p2 0xC/* H.264 Level 1.2 */
+#define VEN_LEVEL_H264_1p3 0xD/* H.264 Level 1.3 */
+#define VEN_LEVEL_H264_2 0xE/* H.264 Level 2 */
+#define VEN_LEVEL_H264_2p1 0xF/* H.264 Level 2.1 */
+#define VEN_LEVEL_H264_2p2 0x10/* H.264 Level 2.2 */
+#define VEN_LEVEL_H264_3 0x11/* H.264 Level 3 */
+#define VEN_LEVEL_H264_3p1 0x12/* H.264 Level 3.1 */
+#define VEN_LEVEL_H264_3p2 0x13/* H.264 Level 3.2 */
+#define VEN_LEVEL_H264_4 0x14/* H.264 Level 4 */
+
+#define VEN_LEVEL_H263_10 0x15/* H.263 Level 10 */
+#define VEN_LEVEL_H263_20 0x16/* H.263 Level 20 */
+#define VEN_LEVEL_H263_30 0x17/* H.263 Level 30 */
+#define VEN_LEVEL_H263_40 0x18/* H.263 Level 40 */
+#define VEN_LEVEL_H263_45 0x19/* H.263 Level 45 */
+#define VEN_LEVEL_H263_50 0x1A/* H.263 Level 50 */
+#define VEN_LEVEL_H263_60 0x1B/* H.263 Level 60 */
+#define VEN_LEVEL_H263_70 0x1C/* H.263 Level 70 */
+
+/*Entropy coding model selection for H.264 encoder.*/
+#define VEN_ENTROPY_MODEL_CAVLC 1
+#define VEN_ENTROPY_MODEL_CABAC 2
+/*Cabac model number (0,1,2) for encoder.*/
+#define VEN_CABAC_MODEL_0 1/* CABAC Model 0. */
+#define VEN_CABAC_MODEL_1 2/* CABAC Model 1. */
+#define VEN_CABAC_MODEL_2 3/* CABAC Model 2. */
+
+/*Deblocking filter control type for encoder.*/
+#define VEN_DB_DISABLE 1/* 1 - Disable deblocking filter*/
+#define VEN_DB_ALL_BLKG_BNDRY 2/* 2 - All blocking boundary filtering*/
+#define VEN_DB_SKIP_SLICE_BNDRY 3/* 3 - Filtering except sliceboundary*/
+
+/*Different methods of Multi slice selection.*/
+#define VEN_MSLICE_OFF 1
+#define VEN_MSLICE_CNT_MB 2 /*number of MBscount per slice*/
+#define VEN_MSLICE_CNT_BYTE 3 /*number of bytes count per slice.*/
+#define VEN_MSLICE_GOB 4 /*Multi slice by GOB for H.263 only.*/
+
+/*Different modes for Rate Control.*/
+#define VEN_RC_OFF 1
+#define VEN_RC_VBR_VFR 2
+#define VEN_RC_VBR_CFR 3
+#define VEN_RC_CBR_VFR 4
+#define VEN_RC_CBR_CFR 5
+
+/*Different modes for flushing buffers*/
+#define VEN_FLUSH_INPUT 1
+#define VEN_FLUSH_OUTPUT 2
+#define VEN_FLUSH_ALL 3
+
+/*Different input formats for YUV data.*/
+#define VEN_INPUTFMT_NV12 1/* NV12 Linear */
+#define VEN_INPUTFMT_NV21 2/* NV21 Linear */
+#define VEN_INPUTFMT_NV12_16M2KA 3/* NV12 Linear */
+
+/*Different allowed rotation modes.*/
+#define VEN_ROTATION_0 1/* 0 degrees */
+#define VEN_ROTATION_90 2/* 90 degrees */
+#define VEN_ROTATION_180 3/* 180 degrees */
+#define VEN_ROTATION_270 4/* 270 degrees */
+
+/*IOCTL timeout values*/
+#define VEN_TIMEOUT_INFINITE 0xffffffff
+
+/*Different allowed intra refresh modes.*/
+#define VEN_IR_OFF 1
+#define VEN_IR_CYCLIC 2
+#define VEN_IR_RANDOM 3
+
+/*IOCTL BASE CODES Not to be used directly by the client.*/
+/* Base value for ioctls that are not related to encoder configuration.*/
+#define VEN_IOCTLBASE_NENC 0x800
+/* Base value for encoder configuration ioctls*/
+#define VEN_IOCTLBASE_ENC 0x850
+
+struct venc_ioctl_msg {
+ void __user *in;
+ void __user *out;
+};
+
+/*NON ENCODER CONFIGURATION IOCTLs*/
+
+/*IOCTL params:SET: InputData - unsigned long, OutputData - NULL*/
+#define VEN_IOCTL_SET_INTF_VERSION \
+ _IOW(VEN_IOCTLBASE_NENC, 0, struct venc_ioctl_msg)
+
+/*IOCTL params:CMD: InputData - venc_timeout, OutputData - venc_msg*/
+#define VEN_IOCTL_CMD_READ_NEXT_MSG \
+ _IOWR(VEN_IOCTLBASE_NENC, 1, struct venc_ioctl_msg)
+
+/*IOCTL params:CMD: InputData - NULL, OutputData - NULL*/
+#define VEN_IOCTL_CMD_STOP_READ_MSG _IO(VEN_IOCTLBASE_NENC, 2)
+
+/*
+ * IOCTL params:SET: InputData - venc_allocatorproperty, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_allocatorproperty
+ */
+#define VEN_IOCTL_SET_INPUT_BUFFER_REQ \
+ _IOW(VEN_IOCTLBASE_NENC, 3, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_INPUT_BUFFER_REQ \
+ _IOR(VEN_IOCTLBASE_NENC, 4, struct venc_ioctl_msg)
+
+/*IOCTL params:CMD: InputData - venc_bufferpayload, OutputData - NULL*/
+#define VEN_IOCTL_CMD_ALLOC_INPUT_BUFFER \
+ _IOW(VEN_IOCTLBASE_NENC, 5, struct venc_ioctl_msg)
+
+/*IOCTL params:CMD: InputData - venc_bufferpayload, OutputData - NULL*/
+#define VEN_IOCTL_SET_INPUT_BUFFER \
+ _IOW(VEN_IOCTLBASE_NENC, 6, struct venc_ioctl_msg)
+
+/*IOCTL params: CMD: InputData - venc_bufferpayload, OutputData - NULL*/
+#define VEN_IOCTL_CMD_FREE_INPUT_BUFFER \
+ _IOW(VEN_IOCTLBASE_NENC, 7, struct venc_ioctl_msg)
+
+/*
+ * IOCTL params:SET: InputData - venc_allocatorproperty, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_allocatorproperty
+ */
+#define VEN_IOCTL_SET_OUTPUT_BUFFER_REQ \
+ _IOW(VEN_IOCTLBASE_NENC, 8, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_OUTPUT_BUFFER_REQ \
+ _IOR(VEN_IOCTLBASE_NENC, 9, struct venc_ioctl_msg)
+
+/*IOCTL params:CMD: InputData - venc_bufferpayload, OutputData - NULL*/
+#define VEN_IOCTL_CMD_ALLOC_OUTPUT_BUFFER \
+ _IOW(VEN_IOCTLBASE_NENC, 10, struct venc_ioctl_msg)
+
+
+/*IOCTL params:CMD: InputData - venc_bufferpayload, OutputData - NULL*/
+#define VEN_IOCTL_SET_OUTPUT_BUFFER \
+ _IOW(VEN_IOCTLBASE_NENC, 11, struct venc_ioctl_msg)
+
+/*IOCTL params:CMD: InputData - venc_bufferpayload, OutputData - NULL.*/
+#define VEN_IOCTL_CMD_FREE_OUTPUT_BUFFER \
+ _IOW(VEN_IOCTLBASE_NENC, 12, struct venc_ioctl_msg)
+
+
+/* Asynchronous respone message code:* VEN_MSG_START*/
+#define VEN_IOCTL_CMD_START _IO(VEN_IOCTLBASE_NENC, 13)
+
+
+/*
+ * IOCTL params:CMD: InputData - venc_buffer, OutputData - NULL
+ * Asynchronous respone message code:VEN_MSG_INPUT_BUFFER_DONE
+ */
+#define VEN_IOCTL_CMD_ENCODE_FRAME \
+ _IOW(VEN_IOCTLBASE_NENC, 14, struct venc_ioctl_msg)
+
+
+/*
+ *IOCTL params:CMD: InputData - venc_buffer, OutputData - NULL
+ *Asynchronous response message code:VEN_MSG_OUTPUT_BUFFER_DONE
+ */
+#define VEN_IOCTL_CMD_FILL_OUTPUT_BUFFER \
+ _IOW(VEN_IOCTLBASE_NENC, 15, struct venc_ioctl_msg)
+
+/*
+ * IOCTL params:CMD: InputData - venc_bufferflush, OutputData - NULL
+ * Asynchronous response message code:VEN_MSG_INPUT_BUFFER_DONE
+ */
+#define VEN_IOCTL_CMD_FLUSH \
+ _IOW(VEN_IOCTLBASE_NENC, 16, struct venc_ioctl_msg)
+
+
+/*Asynchronous respone message code:VEN_MSG_PAUSE*/
+#define VEN_IOCTL_CMD_PAUSE _IO(VEN_IOCTLBASE_NENC, 17)
+
+/*Asynchronous respone message code:VEN_MSG_RESUME*/
+#define VEN_IOCTL_CMD_RESUME _IO(VEN_IOCTLBASE_NENC, 18)
+
+/* Asynchronous respone message code:VEN_MSG_STOP*/
+#define VEN_IOCTL_CMD_STOP _IO(VEN_IOCTLBASE_NENC, 19)
+
+#define VEN_IOCTL_SET_RECON_BUFFER \
+ _IOW(VEN_IOCTLBASE_NENC, 20, struct venc_ioctl_msg)
+
+#define VEN_IOCTL_FREE_RECON_BUFFER \
+ _IOW(VEN_IOCTLBASE_NENC, 21, struct venc_ioctl_msg)
+
+#define VEN_IOCTL_GET_RECON_BUFFER_SIZE \
+ _IOW(VEN_IOCTLBASE_NENC, 22, struct venc_ioctl_msg)
+
+
+
+/*ENCODER PROPERTY CONFIGURATION & CAPABILITY IOCTLs*/
+
+/*
+ * IOCTL params:SET: InputData - venc_basecfg, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_basecfg
+ */
+#define VEN_IOCTL_SET_BASE_CFG \
+ _IOW(VEN_IOCTLBASE_ENC, 1, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_BASE_CFG \
+ _IOR(VEN_IOCTLBASE_ENC, 2, struct venc_ioctl_msg)
+
+/*
+ * IOCTL params:SET: InputData - venc_switch, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_switch
+ */
+#define VEN_IOCTL_SET_LIVE_MODE \
+ _IOW(VEN_IOCTLBASE_ENC, 3, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_LIVE_MODE \
+ _IOR(VEN_IOCTLBASE_ENC, 4, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_profile, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_profile
+ */
+#define VEN_IOCTL_SET_CODEC_PROFILE \
+ _IOW(VEN_IOCTLBASE_ENC, 5, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_CODEC_PROFILE \
+ _IOR(VEN_IOCTLBASE_ENC, 6, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - ven_profilelevel, OutputData - NULL
+ * GET: InputData - NULL, OutputData - ven_profilelevel
+ */
+#define VEN_IOCTL_SET_PROFILE_LEVEL \
+ _IOW(VEN_IOCTLBASE_ENC, 7, struct venc_ioctl_msg)
+
+#define VEN_IOCTL_GET_PROFILE_LEVEL \
+ _IOR(VEN_IOCTLBASE_ENC, 8, struct venc_ioctl_msg)
+
+/*
+ * IOCTL params:SET: InputData - venc_switch, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_switch
+ */
+#define VEN_IOCTL_SET_SHORT_HDR \
+ _IOW(VEN_IOCTLBASE_ENC, 9, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_SHORT_HDR \
+ _IOR(VEN_IOCTLBASE_ENC, 10, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params: SET: InputData - venc_sessionqp, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_sessionqp
+ */
+#define VEN_IOCTL_SET_SESSION_QP \
+ _IOW(VEN_IOCTLBASE_ENC, 11, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_SESSION_QP \
+ _IOR(VEN_IOCTLBASE_ENC, 12, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_intraperiod, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_intraperiod
+ */
+#define VEN_IOCTL_SET_INTRA_PERIOD \
+ _IOW(VEN_IOCTLBASE_ENC, 13, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_INTRA_PERIOD \
+ _IOR(VEN_IOCTLBASE_ENC, 14, struct venc_ioctl_msg)
+
+
+/* Request an Iframe*/
+#define VEN_IOCTL_CMD_REQUEST_IFRAME _IO(VEN_IOCTLBASE_ENC, 15)
+
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_capability*/
+#define VEN_IOCTL_GET_CAPABILITY \
+ _IOR(VEN_IOCTLBASE_ENC, 16, struct venc_ioctl_msg)
+
+
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_seqheader*/
+#define VEN_IOCTL_GET_SEQUENCE_HDR \
+ _IOR(VEN_IOCTLBASE_ENC, 17, struct venc_ioctl_msg)
+
+/*
+ * IOCTL params:SET: InputData - venc_entropycfg, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_entropycfg
+ */
+#define VEN_IOCTL_SET_ENTROPY_CFG \
+ _IOW(VEN_IOCTLBASE_ENC, 18, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_ENTROPY_CFG \
+ _IOR(VEN_IOCTLBASE_ENC, 19, struct venc_ioctl_msg)
+
+/*
+ * IOCTL params:SET: InputData - venc_dbcfg, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_dbcfg
+ */
+#define VEN_IOCTL_SET_DEBLOCKING_CFG \
+ _IOW(VEN_IOCTLBASE_ENC, 20, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_DEBLOCKING_CFG \
+ _IOR(VEN_IOCTLBASE_ENC, 21, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_intrarefresh, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_intrarefresh
+ */
+#define VEN_IOCTL_SET_INTRA_REFRESH \
+ _IOW(VEN_IOCTLBASE_ENC, 22, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_INTRA_REFRESH \
+ _IOR(VEN_IOCTLBASE_ENC, 23, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_multiclicecfg, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_multiclicecfg
+ */
+#define VEN_IOCTL_SET_MULTI_SLICE_CFG \
+ _IOW(VEN_IOCTLBASE_ENC, 24, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_MULTI_SLICE_CFG \
+ _IOR(VEN_IOCTLBASE_ENC, 25, struct venc_ioctl_msg)
+
+/*
+ * IOCTL params:SET: InputData - venc_ratectrlcfg, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_ratectrlcfg
+ */
+#define VEN_IOCTL_SET_RATE_CTRL_CFG \
+ _IOW(VEN_IOCTLBASE_ENC, 26, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_RATE_CTRL_CFG \
+ _IOR(VEN_IOCTLBASE_ENC, 27, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_voptimingcfg, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_voptimingcfg
+ */
+#define VEN_IOCTL_SET_VOP_TIMING_CFG \
+ _IOW(VEN_IOCTLBASE_ENC, 28, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_VOP_TIMING_CFG \
+ _IOR(VEN_IOCTLBASE_ENC, 29, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_framerate, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_framerate
+ */
+#define VEN_IOCTL_SET_FRAME_RATE \
+ _IOW(VEN_IOCTLBASE_ENC, 30, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_FRAME_RATE \
+ _IOR(VEN_IOCTLBASE_ENC, 31, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_targetbitrate, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_targetbitrate
+ */
+#define VEN_IOCTL_SET_TARGET_BITRATE \
+ _IOW(VEN_IOCTLBASE_ENC, 32, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_TARGET_BITRATE \
+ _IOR(VEN_IOCTLBASE_ENC, 33, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_rotation, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_rotation
+ */
+#define VEN_IOCTL_SET_ROTATION \
+ _IOW(VEN_IOCTLBASE_ENC, 34, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_ROTATION \
+ _IOR(VEN_IOCTLBASE_ENC, 35, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_headerextension, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_headerextension
+ */
+#define VEN_IOCTL_SET_HEC \
+ _IOW(VEN_IOCTLBASE_ENC, 36, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_HEC \
+ _IOR(VEN_IOCTLBASE_ENC, 37, struct venc_ioctl_msg)
+
+/*
+ * IOCTL params:SET: InputData - venc_switch, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_switch
+ */
+#define VEN_IOCTL_SET_DATA_PARTITION \
+ _IOW(VEN_IOCTLBASE_ENC, 38, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_DATA_PARTITION \
+ _IOR(VEN_IOCTLBASE_ENC, 39, struct venc_ioctl_msg)
+
+/*
+ * IOCTL params:SET: InputData - venc_switch, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_switch
+ */
+#define VEN_IOCTL_SET_RVLC \
+ _IOW(VEN_IOCTLBASE_ENC, 40, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_RVLC \
+ _IOR(VEN_IOCTLBASE_ENC, 41, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_switch, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_switch
+ */
+#define VEN_IOCTL_SET_AC_PREDICTION \
+ _IOW(VEN_IOCTLBASE_ENC, 42, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_AC_PREDICTION \
+ _IOR(VEN_IOCTLBASE_ENC, 43, struct venc_ioctl_msg)
+
+
+/*
+ * IOCTL params:SET: InputData - venc_qprange, OutputData - NULL
+ * GET: InputData - NULL, OutputData - venc_qprange
+ */
+#define VEN_IOCTL_SET_QP_RANGE \
+ _IOW(VEN_IOCTLBASE_ENC, 44, struct venc_ioctl_msg)
+#define VEN_IOCTL_GET_QP_RANGE \
+ _IOR(VEN_IOCTLBASE_ENC, 45, struct venc_ioctl_msg)
+
+#define VEN_IOCTL_GET_NUMBER_INSTANCES \
+ _IOR(VEN_IOCTLBASE_ENC, 46, struct venc_ioctl_msg)
+
+#define VEN_IOCTL_SET_METABUFFER_MODE \
+ _IOW(VEN_IOCTLBASE_ENC, 47, struct venc_ioctl_msg)
+
+
+/*IOCTL params:SET: InputData - unsigned int, OutputData - NULL.*/
+#define VEN_IOCTL_SET_EXTRADATA \
+ _IOW(VEN_IOCTLBASE_ENC, 48, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - unsigned int.*/
+#define VEN_IOCTL_GET_EXTRADATA \
+ _IOR(VEN_IOCTLBASE_ENC, 49, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - NULL, OutputData - NULL.*/
+#define VEN_IOCTL_SET_SLICE_DELIVERY_MODE \
+ _IO(VEN_IOCTLBASE_ENC, 50)
+
+#define VEN_IOCTL_SET_H263_PLUSPTYPE \
+ _IOW(VEN_IOCTLBASE_ENC, 51, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_range, OutputData - NULL.*/
+#define VEN_IOCTL_SET_CAPABILITY_LTRCOUNT \
+ _IOW(VEN_IOCTLBASE_ENC, 52, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_range.*/
+#define VEN_IOCTL_GET_CAPABILITY_LTRCOUNT \
+ _IOR(VEN_IOCTLBASE_ENC, 53, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_ltrmode, OutputData - NULL.*/
+#define VEN_IOCTL_SET_LTRMODE \
+ _IOW(VEN_IOCTLBASE_ENC, 54, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_ltrmode.*/
+#define VEN_IOCTL_GET_LTRMODE \
+ _IOR(VEN_IOCTLBASE_ENC, 55, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_ltrcount, OutputData - NULL.*/
+#define VEN_IOCTL_SET_LTRCOUNT \
+ _IOW(VEN_IOCTLBASE_ENC, 56, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_ltrcount.*/
+#define VEN_IOCTL_GET_LTRCOUNT \
+ _IOR(VEN_IOCTLBASE_ENC, 57, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_ltrperiod, OutputData - NULL.*/
+#define VEN_IOCTL_SET_LTRPERIOD \
+ _IOW(VEN_IOCTLBASE_ENC, 58, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_ltrperiod.*/
+#define VEN_IOCTL_GET_LTRPERIOD \
+ _IOR(VEN_IOCTLBASE_ENC, 59, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_ltruse, OutputData - NULL.*/
+#define VEN_IOCTL_SET_LTRUSE \
+ _IOW(VEN_IOCTLBASE_ENC, 60, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_ltruse.*/
+#define VEN_IOCTL_GET_LTRUSE \
+ _IOR(VEN_IOCTLBASE_ENC, 61, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_ltrmark, OutputData - NULL.*/
+#define VEN_IOCTL_SET_LTRMARK \
+ _IOW(VEN_IOCTLBASE_ENC, 62, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_ltrmark.*/
+#define VEN_IOCTL_GET_LTRMARK \
+ _IOR(VEN_IOCTLBASE_ENC, 63, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - unsigned int, OutputData - NULL*/
+#define VEN_IOCTL_SET_SPS_PPS_FOR_IDR \
+ _IOW(VEN_IOCTLBASE_ENC, 64, struct venc_ioctl_msg)
+
+struct venc_range {
+ unsigned long max;
+ unsigned long min;
+ unsigned long step_size;
+};
+
+struct venc_switch {
+ unsigned char status;
+};
+
+struct venc_allocatorproperty {
+ unsigned long mincount;
+ unsigned long maxcount;
+ unsigned long actualcount;
+ unsigned long datasize;
+ unsigned long suffixsize;
+ unsigned long alignment;
+ unsigned long bufpoolid;
+};
+
+struct venc_bufferpayload {
+ unsigned char *pbuffer;
+ size_t sz;
+ int fd;
+ unsigned int offset;
+ unsigned int maped_size;
+ unsigned long filled_len;
+};
+
+struct venc_buffer {
+ unsigned char *ptrbuffer;
+ unsigned long sz;
+ unsigned long len;
+ unsigned long offset;
+ long long timestamp;
+ unsigned long flags;
+ void *clientdata;
+};
+
+struct venc_basecfg {
+ unsigned long input_width;
+ unsigned long input_height;
+ unsigned long dvs_width;
+ unsigned long dvs_height;
+ unsigned long codectype;
+ unsigned long fps_num;
+ unsigned long fps_den;
+ unsigned long targetbitrate;
+ unsigned long inputformat;
+};
+
+struct venc_profile {
+ unsigned long profile;
+};
+struct ven_profilelevel {
+ unsigned long level;
+};
+
+struct venc_sessionqp {
+ unsigned long iframeqp;
+ unsigned long pframqp;
+};
+
+struct venc_qprange {
+ unsigned long maxqp;
+ unsigned long minqp;
+};
+
+struct venc_plusptype {
+ unsigned long plusptype_enable;
+};
+
+struct venc_intraperiod {
+ unsigned long num_pframes;
+ unsigned long num_bframes;
+};
+struct venc_seqheader {
+ unsigned char *hdrbufptr;
+ unsigned long bufsize;
+ unsigned long hdrlen;
+};
+
+struct venc_capability {
+ unsigned long codec_types;
+ unsigned long maxframe_width;
+ unsigned long maxframe_height;
+ unsigned long maxtarget_bitrate;
+ unsigned long maxframe_rate;
+ unsigned long input_formats;
+ unsigned char dvs;
+};
+
+struct venc_entropycfg {
+ unsigned int longentropysel;
+ unsigned long cabacmodel;
+};
+
+struct venc_dbcfg {
+ unsigned long db_mode;
+ unsigned long slicealpha_offset;
+ unsigned long slicebeta_offset;
+};
+
+struct venc_intrarefresh {
+ unsigned long irmode;
+ unsigned long mbcount;
+};
+
+struct venc_multiclicecfg {
+ unsigned long mslice_mode;
+ unsigned long mslice_size;
+};
+
+struct venc_bufferflush {
+ unsigned long flush_mode;
+};
+
+struct venc_ratectrlcfg {
+ unsigned long rcmode;
+};
+
+struct venc_voptimingcfg {
+ unsigned long voptime_resolution;
+};
+struct venc_framerate {
+ unsigned long fps_denominator;
+ unsigned long fps_numerator;
+};
+
+struct venc_targetbitrate {
+ unsigned long target_bitrate;
+};
+
+
+struct venc_rotation {
+ unsigned long rotation;
+};
+
+struct venc_timeout {
+ unsigned long millisec;
+};
+
+struct venc_headerextension {
+ unsigned long header_extension;
+};
+
+struct venc_msg {
+ unsigned long statuscode;
+ unsigned long msgcode;
+ struct venc_buffer buf;
+ unsigned long msgdata_size;
+};
+
+struct venc_recon_addr {
+ unsigned char *pbuffer;
+ unsigned long buffer_size;
+ unsigned long pmem_fd;
+ unsigned long offset;
+};
+
+struct venc_recon_buff_size {
+ int width;
+ int height;
+ int size;
+ int alignment;
+};
+
+struct venc_ltrmode {
+ unsigned long ltr_mode;
+};
+
+struct venc_ltrcount {
+ unsigned long ltr_count;
+};
+
+struct venc_ltrperiod {
+ unsigned long ltr_period;
+};
+
+struct venc_ltruse {
+ unsigned long ltr_id;
+ unsigned long ltr_frames;
+};
+
+#endif /* _UAPI_MSM_VIDC_ENC_H_ */
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index 26c9cf4..bd64b1a 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -789,6 +789,7 @@
unsigned long flags;
unsigned int num_cpus = cluster->num_cpus;
unsigned int nr_isolated = 0;
+ bool first_pass = cluster->nr_not_preferred_cpus;
/*
* Protect against entry being removed (and added at tail) by other
@@ -834,6 +835,7 @@
cluster->nr_isolated_cpus += nr_isolated;
spin_unlock_irqrestore(&state_lock, flags);
+again:
/*
* If the number of active CPUs is within the limits, then
* don't force isolation of any busy CPUs.
@@ -853,6 +855,9 @@
if (cluster->active_cpus <= cluster->max_cpus)
break;
+ if (first_pass && !c->not_preferred)
+ continue;
+
spin_unlock_irqrestore(&state_lock, flags);
pr_debug("Trying to isolate CPU%u\n", c->cpu);
@@ -869,6 +874,10 @@
cluster->nr_isolated_cpus += nr_isolated;
spin_unlock_irqrestore(&state_lock, flags);
+ if (first_pass && cluster->active_cpus > cluster->max_cpus) {
+ first_pass = false;
+ goto again;
+ }
}
static void __try_to_unisolate(struct cluster_data *cluster,
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 87bea1e..aec86a26 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -133,6 +133,8 @@
if (lowest_mask) {
cpumask_and(lowest_mask, tsk_cpus_allowed(p), vec->mask);
+ cpumask_andnot(lowest_mask, lowest_mask,
+ cpu_isolated_mask);
if (drop_nopreempts)
drop_nopreempt_cpus(lowest_mask);
/*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 78c433a..fff4170 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6155,7 +6155,8 @@
bool __cpu_overutilized(int cpu, int delta)
{
- return (capacity_of(cpu) * 1024) < ((cpu_util(cpu) + delta) * capacity_margin);
+ return (capacity_orig_of(cpu) * 1024) <
+ ((cpu_util(cpu) + delta) * capacity_margin);
}
bool cpu_overutilized(int cpu)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 7a32e5a..f23f040 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1834,7 +1834,7 @@
if (avoid_prev_cpu && cpu == prev_cpu)
continue;
- if (__cpu_overutilized(cpu, util + tutil))
+ if (__cpu_overutilized(cpu, tutil))
continue;
if (cpu_isolated(cpu))
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 61aa3c7..d73dfae 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1861,7 +1861,7 @@
}
#ifdef CONFIG_SCHED_WALT
-extern atomic64_t walt_irq_work_lastq_ws;
+extern u64 walt_load_reported_window;
static inline unsigned long
cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load)
@@ -1899,7 +1899,7 @@
walt_load->prev_window_util = util;
walt_load->nl = nl;
walt_load->pl = pl;
- walt_load->ws = atomic64_read(&walt_irq_work_lastq_ws);
+ walt_load->ws = walt_load_reported_window;
}
return (util >= capacity) ? capacity : util;
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index a2debf9..8bacb6f 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -44,7 +44,8 @@
static struct cpu_cycle_counter_cb cpu_cycle_counter_cb;
static bool use_cycle_counter;
DEFINE_MUTEX(cluster_lock);
-atomic64_t walt_irq_work_lastq_ws;
+static atomic64_t walt_irq_work_lastq_ws;
+u64 walt_load_reported_window;
static struct irq_work walt_cpufreq_irq_work;
static struct irq_work walt_migration_irq_work;
@@ -866,6 +867,9 @@
rq->window_start = 1;
sync_cpu_available = 1;
atomic64_set(&walt_irq_work_lastq_ws, rq->window_start);
+ walt_load_reported_window =
+ atomic64_read(&walt_irq_work_lastq_ws);
+
} else {
struct rq *sync_rq = cpu_rq(cpumask_any(cpu_online_mask));
@@ -3142,7 +3146,7 @@
raw_spin_lock(&cpu_rq(cpu)->lock);
wc = ktime_get_ns();
-
+ walt_load_reported_window = atomic64_read(&walt_irq_work_lastq_ws);
for_each_sched_cluster(cluster) {
u64 aggr_grp_load = 0;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 079d91a..37641d1 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -707,6 +707,19 @@
source "lib/Kconfig.kasan"
+config DEBUG_REFCOUNT
+ bool "Verbose refcount checks"
+ help
+ Say Y here if you want reference counters (refcount_t and kref) to
+ generate WARNs on dubious usage. Without this refcount_t will still
+ be a saturating counter and avoid Use-After-Free by turning it into
+ a resource leak Denial-Of-Service.
+
+ Use of this option will increase kernel text size but will alert the
+ admin of potential abuse.
+
+ If in doubt, say "N".
+
endmenu # "Memory Debugging"
config ARCH_HAS_KCOV
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 267db0d..f15f08b 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1685,7 +1685,7 @@
match->flags = flags;
INIT_LIST_HEAD(&match->list);
spin_lock_init(&match->lock);
- atomic_set(&match->sk_ref, 0);
+ refcount_set(&match->sk_ref, 0);
fanout_init_data(match);
match->prot_hook.type = po->prot_hook.type;
match->prot_hook.dev = po->prot_hook.dev;
@@ -1702,19 +1702,19 @@
match->prot_hook.type == po->prot_hook.type &&
match->prot_hook.dev == po->prot_hook.dev) {
err = -ENOSPC;
- if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
+ if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
__dev_remove_pack(&po->prot_hook);
po->fanout = match;
po->rollover = rollover;
rollover = NULL;
- atomic_inc(&match->sk_ref);
+ refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
__fanout_link(sk, po);
err = 0;
}
}
spin_unlock(&po->bind_lock);
- if (err && !atomic_read(&match->sk_ref)) {
+ if (err && !refcount_read(&match->sk_ref)) {
list_del(&match->list);
kfree(match);
}
@@ -1740,7 +1740,7 @@
if (f) {
po->fanout = NULL;
- if (atomic_dec_and_test(&f->sk_ref))
+ if (refcount_dec_and_test(&f->sk_ref))
list_del(&f->list);
else
f = NULL;
diff --git a/net/packet/internal.h b/net/packet/internal.h
index d55bfc3..e76042f 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -1,6 +1,8 @@
#ifndef __PACKET_INTERNAL_H__
#define __PACKET_INTERNAL_H__
+#include <linux/refcount.h>
+
struct packet_mclist {
struct packet_mclist *next;
int ifindex;
@@ -86,7 +88,7 @@
struct list_head list;
struct sock *arr[PACKET_FANOUT_MAX];
spinlock_t lock;
- atomic_t sk_ref;
+ refcount_t sk_ref;
struct packet_type prot_hook ____cacheline_aligned_in_smp;
};
diff --git a/net/rmnet_data/rmnet_data_vnd.c b/net/rmnet_data/rmnet_data_vnd.c
index c4ef460..3603c5e 100644
--- a/net/rmnet_data/rmnet_data_vnd.c
+++ b/net/rmnet_data/rmnet_data_vnd.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -578,7 +578,7 @@
LOGE("Failed to to register netdev [%s]", dev->name);
free_netdev(dev);
*new_device = 0;
- rc = RMNET_CONFIG_UNKNOWN_ERROR;
+ return RMNET_CONFIG_UNKNOWN_ERROR;
} else {
rmnet_devices[id] = dev;
*new_device = dev;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 976cba3..f00e7d3 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -14868,7 +14868,8 @@
if (!ft_event->target_ap)
return;
- msg = nlmsg_new(100 + ft_event->ric_ies_len, GFP_KERNEL);
+ msg = nlmsg_new(100 + ft_event->ies_len + ft_event->ric_ies_len,
+ GFP_KERNEL);
if (!msg)
return;
diff --git a/security/Kconfig b/security/Kconfig
index 2e68fa4..638afc8 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -6,6 +6,11 @@
source security/keys/Kconfig
+if ARCH_QCOM
+source security/pfe/Kconfig
+endif
+
+
config SECURITY_DMESG_RESTRICT
bool "Restrict unprivileged access to the kernel syslog"
default n
diff --git a/security/Makefile b/security/Makefile
index f2d71cd..79166ba 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -9,6 +9,7 @@
subdir-$(CONFIG_SECURITY_APPARMOR) += apparmor
subdir-$(CONFIG_SECURITY_YAMA) += yama
subdir-$(CONFIG_SECURITY_LOADPIN) += loadpin
+subdir-$(CONFIG_ARCH_QCOM) += pfe
# always enable default capabilities
obj-y += commoncap.o
@@ -24,6 +25,7 @@
obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/
obj-$(CONFIG_SECURITY_YAMA) += yama/
obj-$(CONFIG_SECURITY_LOADPIN) += loadpin/
+obj-$(CONFIG_ARCH_QCOM) += pfe/
obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
# Object integrity file lists
diff --git a/security/pfe/Kconfig b/security/pfe/Kconfig
new file mode 100644
index 0000000..0cd9e81
--- /dev/null
+++ b/security/pfe/Kconfig
@@ -0,0 +1,28 @@
+menu "Qualcomm Technologies, Inc Per File Encryption security device drivers"
+ depends on ARCH_QCOM
+
+config PFT
+ bool "Per-File-Tagger driver"
+ depends on SECURITY
+ default n
+ help
+ This driver is used for tagging enterprise files.
+ It is part of the Per-File-Encryption (PFE) feature.
+ The driver is tagging files when created by
+ registered application.
+ Tagged files are encrypted using the dm-req-crypt driver.
+
+config PFK
+ bool "Per-File-Key driver"
+ depends on SECURITY
+ depends on SECURITY_SELINUX
+ default n
+ help
+ This driver is used for storing eCryptfs information
+ in file node.
+ This is part of eCryptfs hardware enhanced solution
+ provided by Qualcomm Technologies, Inc.
+ Information is used when file is encrypted later using
+ ICE or dm crypto engine
+
+endmenu
diff --git a/security/pfe/Makefile b/security/pfe/Makefile
new file mode 100644
index 0000000..4096aad
--- /dev/null
+++ b/security/pfe/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the MSM specific security device drivers.
+#
+
+ccflags-y += -Isecurity/selinux -Isecurity/selinux/include
+#ccflags-y += -Ifs/ext4
+ccflags-y += -Ifs/crypto
+
+obj-$(CONFIG_PFT) += pft.o
+obj-$(CONFIG_PFK) += pfk.o pfk_kc.o pfk_ice.o pfk_ext4.o pfk_f2fs.o
diff --git a/security/pfe/pfk.c b/security/pfe/pfk.c
new file mode 100644
index 0000000..740da32
--- /dev/null
+++ b/security/pfe/pfk.c
@@ -0,0 +1,533 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Per-File-Key (PFK).
+ *
+ * This driver is responsible for overall management of various
+ * Per File Encryption variants that work on top of or as part of different
+ * file systems.
+ *
+ * The driver has the following purpose :
+ * 1) Define priorities between PFE's if more than one is enabled
+ * 2) Extract key information from inode
+ * 3) Load and manage various keys in ICE HW engine
+ * 4) It should be invoked from various layers in FS/BLOCK/STORAGE DRIVER
+ * that need to take decision on HW encryption management of the data
+ * Some examples:
+ * BLOCK LAYER: when it takes decision on whether 2 chunks can be united
+ * to one encryption / decryption request sent to the HW
+ *
+ * UFS DRIVER: when it need to configure ICE HW with a particular key slot
+ * to be used for encryption / decryption
+ *
+ * PFE variants can differ on particular way of storing the cryptographic info
+ * inside inode, actions to be taken upon file operations, etc., but the common
+ * properties are described above
+ *
+ */
+
+
+/* Uncomment the line below to enable debug messages */
+/* #define DEBUG 1 */
+#define pr_fmt(fmt) "pfk [%s]: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
+#include <linux/bio.h>
+#include <linux/security.h>
+#include <crypto/algapi.h>
+#include <crypto/ice.h>
+
+#include <linux/pfk.h>
+
+#include "pfk_kc.h"
+#include "objsec.h"
+#include "pfk_ice.h"
+#include "pfk_ext4.h"
+#include "pfk_f2fs.h"
+#include "pfk_internal.h"
+//#include "ext4.h"
+
+static bool pfk_ready;
+
+
+/* might be replaced by a table when more than one cipher is supported */
+#define PFK_SUPPORTED_KEY_SIZE 32
+#define PFK_SUPPORTED_SALT_SIZE 32
+
+/* Various PFE types and function tables to support each one of them */
+enum pfe_type {EXT4_CRYPT_PFE, F2FS_CRYPT_PFE, INVALID_PFE};
+
+typedef int (*pfk_parse_inode_type)(const struct bio *bio,
+ const struct inode *inode,
+ struct pfk_key_info *key_info,
+ enum ice_cryto_algo_mode *algo,
+ bool *is_pfe);
+
+typedef bool (*pfk_allow_merge_bio_type)(const struct bio *bio1,
+ const struct bio *bio2, const struct inode *inode1,
+ const struct inode *inode2);
+
+static const pfk_parse_inode_type pfk_parse_inode_ftable[] = {
+ /* EXT4_CRYPT_PFE */ &pfk_ext4_parse_inode,
+ /* F2FS_CRYPT_PFE */ &pfk_f2fs_parse_inode,
+};
+
+static const pfk_allow_merge_bio_type pfk_allow_merge_bio_ftable[] = {
+ /* EXT4_CRYPT_PFE */ &pfk_ext4_allow_merge_bio,
+ /* F2FS_CRYPT_PFE */ &pfk_f2fs_allow_merge_bio,
+};
+
+static void __exit pfk_exit(void)
+{
+ pfk_ready = false;
+ pfk_ext4_deinit();
+ pfk_f2fs_deinit();
+ pfk_kc_deinit();
+}
+
+static int __init pfk_init(void)
+{
+
+ int ret = 0;
+
+ ret = pfk_ext4_init();
+ if (ret != 0)
+ goto fail;
+
+ ret = pfk_f2fs_init();
+ if (ret != 0)
+ goto fail;
+
+ ret = pfk_kc_init();
+ if (ret != 0) {
+ pr_err("could init pfk key cache, error %d\n", ret);
+ pfk_ext4_deinit();
+ pfk_f2fs_deinit();
+ goto fail;
+ }
+
+ pfk_ready = true;
+ pr_info("Driver initialized successfully\n");
+
+ return 0;
+
+fail:
+ pr_err("Failed to init driver\n");
+ return -ENODEV;
+}
+
+/*
+ * If more than one type is supported simultaneously, this function will also
+ * set the priority between them
+ */
+static enum pfe_type pfk_get_pfe_type(const struct inode *inode)
+{
+ if (!inode)
+ return INVALID_PFE;
+
+ if (pfk_is_ext4_type(inode))
+ return EXT4_CRYPT_PFE;
+
+ if (pfk_is_f2fs_type(inode))
+ return F2FS_CRYPT_PFE;
+
+ return INVALID_PFE;
+}
+
+/**
+ * inode_to_filename() - get the filename from inode pointer.
+ * @inode: inode pointer
+ *
+ * it is used for debug prints.
+ *
+ * Return: filename string or "unknown".
+ */
+char *inode_to_filename(const struct inode *inode)
+{
+ struct dentry *dentry = NULL;
+ char *filename = NULL;
+
+ if (!inode)
+ return "NULL";
+
+ if (hlist_empty(&inode->i_dentry))
+ return "unknown";
+
+ dentry = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
+ filename = dentry->d_iname;
+
+ return filename;
+}
+
+/**
+ * pfk_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the driver is ready.
+ */
+static inline bool pfk_is_ready(void)
+{
+ return pfk_ready;
+}
+
+/**
+ * pfk_bio_get_inode() - get the inode from a bio.
+ * @bio: Pointer to BIO structure.
+ *
+ * Walk the bio struct links to get the inode.
+ * Please note, that in general bio may consist of several pages from
+ * several files, but in our case we always assume that all pages come
+ * from the same file, since our logic ensures it. That is why we only
+ * walk through the first page to look for inode.
+ *
+ * Return: pointer to the inode struct if successful, or NULL otherwise.
+ *
+ */
+static struct inode *pfk_bio_get_inode(const struct bio *bio)
+{
+ if (!bio)
+ return NULL;
+ if (!bio_has_data((struct bio *)bio))
+ return NULL;
+ if (!bio->bi_io_vec)
+ return NULL;
+ if (!bio->bi_io_vec->bv_page)
+ return NULL;
+
+ if (PageAnon(bio->bi_io_vec->bv_page)) {
+ struct inode *inode;
+
+ /* Using direct-io (O_DIRECT) without page cache */
+ inode = dio_bio_get_inode((struct bio *)bio);
+ pr_debug("inode on direct-io, inode = 0x%pK.\n", inode);
+
+ return inode;
+ }
+
+ if (!page_mapping(bio->bi_io_vec->bv_page))
+ return NULL;
+
+ if (!bio->bi_io_vec->bv_page->mapping->host)
+
+ return NULL;
+
+ return bio->bi_io_vec->bv_page->mapping->host;
+}
+
+/**
+ * pfk_key_size_to_key_type() - translate key size to key size enum
+ * @key_size: key size in bytes
+ * @key_size_type: pointer to store the output enum (can be null)
+ *
+ * return 0 in case of success, error otherwise (i.e not supported key size)
+ */
+int pfk_key_size_to_key_type(size_t key_size,
+ enum ice_crpto_key_size *key_size_type)
+{
+ /*
+ * currently only 32 bit key size is supported
+ * in the future, table with supported key sizes might
+ * be introduced
+ */
+
+ if (key_size != PFK_SUPPORTED_KEY_SIZE) {
+ pr_err("not supported key size %zu\n", key_size);
+ return -EINVAL;
+ }
+
+ if (key_size_type)
+ *key_size_type = ICE_CRYPTO_KEY_SIZE_256;
+
+ return 0;
+}
+
+/*
+ * Retrieves filesystem type from inode's superblock
+ */
+bool pfe_is_inode_filesystem_type(const struct inode *inode,
+ const char *fs_type)
+{
+ if (!inode || !fs_type)
+ return false;
+
+ if (!inode->i_sb)
+ return false;
+
+ if (!inode->i_sb->s_type)
+ return false;
+
+ return (strcmp(inode->i_sb->s_type->name, fs_type) == 0);
+}
+
+/**
+ * pfk_get_key_for_bio() - get the encryption key to be used for a bio
+ *
+ * @bio: pointer to the BIO
+ * @key_info: pointer to the key information which will be filled in
+ * @algo_mode: optional pointer to the algorithm identifier which will be set
+ * @is_pfe: will be set to false if the BIO should be left unencrypted
+ *
+ * Return: 0 if a key is being used, otherwise a -errno value
+ */
+static int pfk_get_key_for_bio(const struct bio *bio,
+ struct pfk_key_info *key_info,
+ enum ice_cryto_algo_mode *algo_mode,
+ bool *is_pfe)
+{
+ const struct inode *inode;
+ enum pfe_type which_pfe;
+ const struct blk_encryption_key *key;
+
+ inode = pfk_bio_get_inode(bio);
+ which_pfe = pfk_get_pfe_type(inode);
+
+ if (which_pfe != INVALID_PFE) {
+ /* Encrypted file; override ->bi_crypt_key */
+ pr_debug("parsing inode %lu with PFE type %d\n",
+ inode->i_ino, which_pfe);
+ return (*(pfk_parse_inode_ftable[which_pfe]))
+ (bio, inode, key_info, algo_mode, is_pfe);
+ }
+
+ /*
+ * bio is not for an encrypted file. Use ->bi_crypt_key if it was set.
+ * Otherwise, don't encrypt/decrypt the bio.
+ */
+ key = bio->bi_crypt_key;
+ if (!key) {
+ *is_pfe = false;
+ return -EINVAL;
+ }
+
+ /* Note: the "salt" is really just the second half of the XTS key. */
+ BUILD_BUG_ON(sizeof(key->raw) !=
+ PFK_SUPPORTED_KEY_SIZE + PFK_SUPPORTED_SALT_SIZE);
+ key_info->key = &key->raw[0];
+ key_info->key_size = PFK_SUPPORTED_KEY_SIZE;
+ key_info->salt = &key->raw[PFK_SUPPORTED_KEY_SIZE];
+ key_info->salt_size = PFK_SUPPORTED_SALT_SIZE;
+ if (algo_mode)
+ *algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+ return 0;
+}
+
+
+/**
+ * pfk_load_key_start() - loads PFE encryption key to the ICE
+ * Can also be invoked from non
+ * PFE context, in this case it
+ * is not relevant and is_pfe
+ * flag is set to false
+ *
+ * @bio: Pointer to the BIO structure
+ * @ice_setting: Pointer to ice setting structure that will be filled with
+ * ice configuration values, including the index to which the key was loaded
+ * @is_pfe: will be false if inode is not relevant to PFE, in such a case
+ * it should be treated as non PFE by the block layer
+ *
+ * Returns the index where the key is stored in encryption hw and additional
+ * information that will be used later for configuration of the encryption hw.
+ *
+ * Must be followed by pfk_load_key_end when key is no longer used by ice
+ *
+ */
+int pfk_load_key_start(const struct bio *bio,
+ struct ice_crypto_setting *ice_setting, bool *is_pfe,
+ bool async)
+{
+ int ret = 0;
+ struct pfk_key_info key_info = {NULL, NULL, 0, 0};
+ enum ice_cryto_algo_mode algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+ enum ice_crpto_key_size key_size_type = 0;
+ u32 key_index = 0;
+
+ if (!is_pfe) {
+ pr_err("is_pfe is NULL\n");
+ return -EINVAL;
+ }
+
+ /*
+ * only a few errors below can indicate that
+ * this function was not invoked within PFE context,
+ * otherwise we will consider it PFE
+ */
+ *is_pfe = true;
+
+ if (!pfk_is_ready())
+ return -ENODEV;
+
+ if (!ice_setting) {
+ pr_err("ice setting is NULL\n");
+ return -EINVAL;
+ }
+
+ ret = pfk_get_key_for_bio(bio, &key_info, &algo_mode, is_pfe);
+
+ if (ret != 0)
+ return ret;
+
+ ret = pfk_key_size_to_key_type(key_info.key_size, &key_size_type);
+ if (ret != 0)
+ return ret;
+
+ ret = pfk_kc_load_key_start(key_info.key, key_info.key_size,
+ key_info.salt, key_info.salt_size, &key_index, async);
+ if (ret) {
+ if (ret != -EBUSY && ret != -EAGAIN)
+ pr_err("start: could not load key into pfk key cache, error %d\n",
+ ret);
+
+ return ret;
+ }
+
+ ice_setting->key_size = key_size_type;
+ ice_setting->algo_mode = algo_mode;
+ /* hardcoded for now */
+ ice_setting->key_mode = ICE_CRYPTO_USE_LUT_SW_KEY;
+ ice_setting->key_index = key_index;
+
+ pr_debug("loaded key for file %s key_index %d\n",
+ inode_to_filename(pfk_bio_get_inode(bio)), key_index);
+
+ return 0;
+}
+
+/**
+ * pfk_load_key_end() - marks the PFE key as no longer used by ICE
+ * Can also be invoked from non
+ * PFE context, in this case it is not
+ * relevant and is_pfe flag is
+ * set to false
+ *
+ * @bio: Pointer to the BIO structure
+ * @is_pfe: Pointer to is_pfe flag, which will be true if function was invoked
+ * from PFE context
+ */
+int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
+{
+ int ret = 0;
+ struct pfk_key_info key_info = {NULL, NULL, 0, 0};
+
+ if (!is_pfe) {
+ pr_err("is_pfe is NULL\n");
+ return -EINVAL;
+ }
+
+ /* only a few errors below can indicate that
+ * this function was not invoked within PFE context,
+ * otherwise we will consider it PFE
+ */
+ *is_pfe = true;
+
+ if (!pfk_is_ready())
+ return -ENODEV;
+
+ ret = pfk_get_key_for_bio(bio, &key_info, NULL, is_pfe);
+ if (ret != 0)
+ return ret;
+
+ pfk_kc_load_key_end(key_info.key, key_info.key_size,
+ key_info.salt, key_info.salt_size);
+
+ pr_debug("finished using key for file %s\n",
+ inode_to_filename(pfk_bio_get_inode(bio)));
+
+ return 0;
+}
+
+/**
+ * pfk_allow_merge_bio() - Check if 2 BIOs can be merged.
+ * @bio1: Pointer to first BIO structure.
+ * @bio2: Pointer to second BIO structure.
+ *
+ * Prevent merging of BIOs from encrypted and non-encrypted
+ * files, or files encrypted with different key.
+ * Also prevent non encrypted and encrypted data from the same file
+ * to be merged (ecryptfs header if stored inside file should be non
+ * encrypted)
+ * This API is called by the file system block layer.
+ *
+ * Return: true if the BIOs allowed to be merged, false
+ * otherwise.
+ */
+bool pfk_allow_merge_bio(const struct bio *bio1, const struct bio *bio2)
+{
+ const struct blk_encryption_key *key1;
+ const struct blk_encryption_key *key2;
+ const struct inode *inode1;
+ const struct inode *inode2;
+ enum pfe_type which_pfe1;
+ enum pfe_type which_pfe2;
+
+ if (!pfk_is_ready())
+ return false;
+
+ if (!bio1 || !bio2)
+ return false;
+
+ if (bio1 == bio2)
+ return true;
+
+ key1 = bio1->bi_crypt_key;
+ key2 = bio2->bi_crypt_key;
+
+ inode1 = pfk_bio_get_inode(bio1);
+ inode2 = pfk_bio_get_inode(bio2);
+
+ which_pfe1 = pfk_get_pfe_type(inode1);
+ which_pfe2 = pfk_get_pfe_type(inode2);
+
+ /*
+ * If one bio is for an encrypted file and the other is for a different
+ * type of encrypted file or for blocks that are not part of an
+ * encrypted file, do not merge.
+ */
+ if (which_pfe1 != which_pfe2)
+ return false;
+
+ if (which_pfe1 != INVALID_PFE) {
+ /* Both bios are for the same type of encrypted file. */
+ return (*(pfk_allow_merge_bio_ftable[which_pfe1]))(bio1, bio2,
+ inode1, inode2);
+ }
+
+ /*
+ * Neither bio is for an encrypted file. Merge only if the default keys
+ * are the same (or both are NULL).
+ */
+ return key1 == key2 ||
+ (key1 && key2 &&
+ !crypto_memneq(key1->raw, key2->raw, sizeof(key1->raw)));
+}
+
+/**
+ * Flush key table on storage core reset. During core reset key configuration
+ * is lost in ICE. We need to flash the cache, so that the keys will be
+ * reconfigured again for every subsequent transaction
+ */
+void pfk_clear_on_reset(void)
+{
+ if (!pfk_is_ready())
+ return;
+
+ pfk_kc_clear_on_reset();
+}
+
+module_init(pfk_init);
+module_exit(pfk_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Per-File-Key driver");
diff --git a/security/pfe/pfk_ext4.c b/security/pfe/pfk_ext4.c
new file mode 100644
index 0000000..0eb1225
--- /dev/null
+++ b/security/pfe/pfk_ext4.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Per-File-Key (PFK) - EXT4
+ *
+ * This driver is used for working with EXT4 crypt extension
+ *
+ * The key information is stored in node by EXT4 when file is first opened
+ * and will be later accessed by Block Device Driver to actually load the key
+ * to encryption hw.
+ *
+ * PFK exposes API's for loading and removing keys from encryption hw
+ * and also API to determine whether 2 adjacent blocks can be agregated by
+ * Block Layer in one request to encryption hw.
+ *
+ */
+
+
+/* Uncomment the line below to enable debug messages */
+/* #define DEBUG 1 */
+#define pr_fmt(fmt) "pfk_ext4 [%s]: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
+
+#include "fscrypt_ice.h"
+#include "pfk_ext4.h"
+//#include "ext4_ice.h"
+
+static bool pfk_ext4_ready;
+
+/*
+ * pfk_ext4_deinit() - Deinit function, should be invoked by upper PFK layer
+ */
+void pfk_ext4_deinit(void)
+{
+ pfk_ext4_ready = false;
+}
+
+/*
+ * pfk_ecryptfs_init() - Init function, should be invoked by upper PFK layer
+ */
+int __init pfk_ext4_init(void)
+{
+ pfk_ext4_ready = true;
+ pr_info("PFK EXT4 inited successfully\n");
+
+ return 0;
+}
+
+/**
+ * pfk_ecryptfs_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the driver is ready.
+ */
+static inline bool pfk_ext4_is_ready(void)
+{
+ return pfk_ext4_ready;
+}
+
+/**
+ * pfk_ext4_dump_inode() - dumps all interesting info about inode to the screen
+ *
+ *
+ */
+/*
+ * static void pfk_ext4_dump_inode(const struct inode* inode)
+ * {
+ * struct ext4_crypt_info *ci = ext4_encryption_info((struct inode*)inode);
+ *
+ * pr_debug("dumping inode with address 0x%p\n", inode);
+ * pr_debug("S_ISREG is %d\n", S_ISREG(inode->i_mode));
+ * pr_debug("EXT4_INODE_ENCRYPT flag is %d\n",
+ * ext4_test_inode_flag((struct inode*)inode, EXT4_INODE_ENCRYPT));
+ * if (ci) {
+ * pr_debug("crypt_info address 0x%p\n", ci);
+ * pr_debug("ci->ci_data_mode %d\n", ci->ci_data_mode);
+ * } else {
+ * pr_debug("crypt_info is NULL\n");
+ * }
+ * }
+ */
+
+/**
+ * pfk_is_ext4_type() - return true if inode belongs to ICE EXT4 PFE
+ * @inode: inode pointer
+ */
+bool pfk_is_ext4_type(const struct inode *inode)
+{
+ if (!pfe_is_inode_filesystem_type(inode, "ext4"))
+ return false;
+
+ return fscrypt_should_be_processed_by_ice(inode);
+}
+
+/**
+ * pfk_ext4_parse_cipher() - parse cipher from inode to enum
+ * @inode: inode
+ * @algo: pointer to store the output enum (can be null)
+ *
+ * return 0 in case of success, error otherwise (i.e not supported cipher)
+ */
+static int pfk_ext4_parse_cipher(const struct inode *inode,
+ enum ice_cryto_algo_mode *algo)
+{
+ /*
+ * currently only AES XTS algo is supported
+ * in the future, table with supported ciphers might
+ * be introduced
+ */
+
+ if (!inode)
+ return -EINVAL;
+
+ if (!fscrypt_is_aes_xts_cipher(inode)) {
+ pr_err("ext4 alghoritm is not supported by pfk\n");
+ return -EINVAL;
+ }
+
+ if (algo)
+ *algo = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+
+ return 0;
+}
+
+
+int pfk_ext4_parse_inode(const struct bio *bio,
+ const struct inode *inode,
+ struct pfk_key_info *key_info,
+ enum ice_cryto_algo_mode *algo,
+ bool *is_pfe)
+{
+ int ret = 0;
+
+ if (!is_pfe)
+ return -EINVAL;
+
+ /*
+ * only a few errors below can indicate that
+ * this function was not invoked within PFE context,
+ * otherwise we will consider it PFE
+ */
+ *is_pfe = true;
+
+ if (!pfk_ext4_is_ready())
+ return -ENODEV;
+
+ if (!inode)
+ return -EINVAL;
+
+ if (!key_info)
+ return -EINVAL;
+
+ key_info->key = fscrypt_get_ice_encryption_key(inode);
+ if (!key_info->key) {
+ pr_err("could not parse key from ext4\n");
+ return -EINVAL;
+ }
+
+ key_info->key_size = fscrypt_get_ice_encryption_key_size(inode);
+ if (!key_info->key_size) {
+ pr_err("could not parse key size from ext4\n");
+ return -EINVAL;
+ }
+
+ key_info->salt = fscrypt_get_ice_encryption_salt(inode);
+ if (!key_info->salt) {
+ pr_err("could not parse salt from ext4\n");
+ return -EINVAL;
+ }
+
+ key_info->salt_size = fscrypt_get_ice_encryption_salt_size(inode);
+ if (!key_info->salt_size) {
+ pr_err("could not parse salt size from ext4\n");
+ return -EINVAL;
+ }
+
+ ret = pfk_ext4_parse_cipher(inode, algo);
+ if (ret != 0) {
+ pr_err("not supported cipher\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+bool pfk_ext4_allow_merge_bio(const struct bio *bio1,
+ const struct bio *bio2, const struct inode *inode1,
+ const struct inode *inode2)
+{
+ /* if there is no ext4 pfk, don't disallow merging blocks */
+ if (!pfk_ext4_is_ready())
+ return true;
+
+ if (!inode1 || !inode2)
+ return false;
+
+ return fscrypt_is_ice_encryption_info_equal(inode1, inode2);
+}
diff --git a/security/pfe/pfk_ext4.h b/security/pfe/pfk_ext4.h
new file mode 100644
index 0000000..c33232f
--- /dev/null
+++ b/security/pfe/pfk_ext4.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _PFK_EXT4_H_
+#define _PFK_EXT4_H_
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <crypto/ice.h>
+#include "pfk_internal.h"
+
+bool pfk_is_ext4_type(const struct inode *inode);
+
+int pfk_ext4_parse_inode(const struct bio *bio,
+ const struct inode *inode,
+ struct pfk_key_info *key_info,
+ enum ice_cryto_algo_mode *algo,
+ bool *is_pfe);
+
+bool pfk_ext4_allow_merge_bio(const struct bio *bio1,
+ const struct bio *bio2, const struct inode *inode1,
+ const struct inode *inode2);
+
+int __init pfk_ext4_init(void);
+
+void pfk_ext4_deinit(void);
+
+#endif /* _PFK_EXT4_H_ */
diff --git a/security/pfe/pfk_f2fs.c b/security/pfe/pfk_f2fs.c
new file mode 100644
index 0000000..8b9d515
--- /dev/null
+++ b/security/pfe/pfk_f2fs.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Per-File-Key (PFK) - f2fs
+ *
+ * This driver is used for working with EXT4/F2FS crypt extension
+ *
+ * The key information is stored in node by EXT4/F2FS when file is first opened
+ * and will be later accessed by Block Device Driver to actually load the key
+ * to encryption hw.
+ *
+ * PFK exposes API's for loading and removing keys from encryption hw
+ * and also API to determine whether 2 adjacent blocks can be agregated by
+ * Block Layer in one request to encryption hw.
+ *
+ */
+
+
+/* Uncomment the line below to enable debug messages */
+#define DEBUG 1
+#define pr_fmt(fmt) "pfk_f2fs [%s]: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
+
+#include "fscrypt_ice.h"
+#include "pfk_f2fs.h"
+
+static bool pfk_f2fs_ready;
+
+/*
+ * pfk_f2fs_deinit() - Deinit function, should be invoked by upper PFK layer
+ */
+void pfk_f2fs_deinit(void)
+{
+ pfk_f2fs_ready = false;
+}
+
+/*
+ * pfk_f2fs_init() - Init function, should be invoked by upper PFK layer
+ */
+int __init pfk_f2fs_init(void)
+{
+ pfk_f2fs_ready = true;
+ pr_info("PFK F2FS inited successfully\n");
+
+ return 0;
+}
+
+/**
+ * pfk_f2fs_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the driver is ready.
+ */
+static inline bool pfk_f2fs_is_ready(void)
+{
+ return pfk_f2fs_ready;
+}
+
+/**
+ * pfk_is_f2fs_type() - return true if inode belongs to ICE F2FS PFE
+ * @inode: inode pointer
+ */
+bool pfk_is_f2fs_type(const struct inode *inode)
+{
+ if (!pfe_is_inode_filesystem_type(inode, "f2fs"))
+ return false;
+
+ return fscrypt_should_be_processed_by_ice(inode);
+}
+
+/**
+ * pfk_f2fs_parse_cipher() - parse cipher from inode to enum
+ * @inode: inode
+ * @algo: pointer to store the output enum (can be null)
+ *
+ * return 0 in case of success, error otherwise (i.e not supported cipher)
+ */
+static int pfk_f2fs_parse_cipher(const struct inode *inode,
+ enum ice_cryto_algo_mode *algo)
+{
+ /*
+ * currently only AES XTS algo is supported
+ * in the future, table with supported ciphers might
+ * be introduced
+ */
+ if (!inode)
+ return -EINVAL;
+
+ if (!fscrypt_is_aes_xts_cipher(inode)) {
+ pr_err("f2fs alghoritm is not supported by pfk\n");
+ return -EINVAL;
+ }
+
+ if (algo)
+ *algo = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+
+ return 0;
+}
+
+
+int pfk_f2fs_parse_inode(const struct bio *bio,
+ const struct inode *inode,
+ struct pfk_key_info *key_info,
+ enum ice_cryto_algo_mode *algo,
+ bool *is_pfe)
+{
+ int ret = 0;
+
+ if (!is_pfe)
+ return -EINVAL;
+
+ /*
+ * only a few errors below can indicate that
+ * this function was not invoked within PFE context,
+ * otherwise we will consider it PFE
+ */
+ *is_pfe = true;
+
+ if (!pfk_f2fs_is_ready())
+ return -ENODEV;
+
+ if (!inode)
+ return -EINVAL;
+
+ if (!key_info)
+ return -EINVAL;
+
+ key_info->key = fscrypt_get_ice_encryption_key(inode);
+ if (!key_info->key) {
+ pr_err("could not parse key from f2fs\n");
+ return -EINVAL;
+ }
+
+ key_info->key_size = fscrypt_get_ice_encryption_key_size(inode);
+ if (!key_info->key_size) {
+ pr_err("could not parse key size from f2fs\n");
+ return -EINVAL;
+ }
+
+ key_info->salt = fscrypt_get_ice_encryption_salt(inode);
+ if (!key_info->salt) {
+ pr_err("could not parse salt from f2fs\n");
+ return -EINVAL;
+ }
+
+ key_info->salt_size = fscrypt_get_ice_encryption_salt_size(inode);
+ if (!key_info->salt_size) {
+ pr_err("could not parse salt size from f2fs\n");
+ return -EINVAL;
+ }
+
+ ret = pfk_f2fs_parse_cipher(inode, algo);
+ if (ret != 0) {
+ pr_err("not supported cipher\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+bool pfk_f2fs_allow_merge_bio(const struct bio *bio1,
+ const struct bio *bio2, const struct inode *inode1,
+ const struct inode *inode2)
+{
+ bool mergeable;
+
+ /* if there is no f2fs pfk, don't disallow merging blocks */
+ if (!pfk_f2fs_is_ready())
+ return true;
+
+ if (!inode1 || !inode2)
+ return false;
+
+ mergeable = fscrypt_is_ice_encryption_info_equal(inode1, inode2);
+ if (!mergeable)
+ return false;
+
+
+ /* ICE allows only consecutive iv_key stream. */
+ if (!bio_dun(bio1) && !bio_dun(bio2))
+ return true;
+ else if (!bio_dun(bio1) || !bio_dun(bio2))
+ return false;
+
+ return bio_end_dun(bio1) == bio_dun(bio2);
+}
diff --git a/security/pfe/pfk_f2fs.h b/security/pfe/pfk_f2fs.h
new file mode 100644
index 0000000..551d529
--- /dev/null
+++ b/security/pfe/pfk_f2fs.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _PFK_F2FS_H_
+#define _PFK_F2FS_H_
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <crypto/ice.h>
+#include "pfk_internal.h"
+
+bool pfk_is_f2fs_type(const struct inode *inode);
+
+int pfk_f2fs_parse_inode(const struct bio *bio,
+ const struct inode *inode,
+ struct pfk_key_info *key_info,
+ enum ice_cryto_algo_mode *algo,
+ bool *is_pfe);
+
+bool pfk_f2fs_allow_merge_bio(const struct bio *bio1,
+ const struct bio *bio2, const struct inode *inode1,
+ const struct inode *inode2);
+
+int __init pfk_f2fs_init(void);
+
+void pfk_f2fs_deinit(void);
+
+#endif /* _PFK_F2FS_H_ */
diff --git a/security/pfe/pfk_ice.c b/security/pfe/pfk_ice.c
new file mode 100644
index 0000000..16ed516
--- /dev/null
+++ b/security/pfe/pfk_ice.c
@@ -0,0 +1,189 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/async.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <soc/qcom/scm.h>
+#include <linux/device-mapper.h>
+#include <soc/qcom/qseecomi.h>
+#include <crypto/ice.h>
+#include "pfk_ice.h"
+
+
+/**********************************/
+/** global definitions **/
+/**********************************/
+
+#define TZ_ES_SET_ICE_KEY 0x2
+#define TZ_ES_INVALIDATE_ICE_KEY 0x3
+
+/* index 0 and 1 is reserved for FDE */
+#define MIN_ICE_KEY_INDEX 2
+
+#define MAX_ICE_KEY_INDEX 31
+
+
+#define TZ_ES_SET_ICE_KEY_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_ES, TZ_ES_SET_ICE_KEY)
+
+
+#define TZ_ES_INVALIDATE_ICE_KEY_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, \
+ TZ_SVC_ES, TZ_ES_INVALIDATE_ICE_KEY)
+
+
+#define TZ_ES_SET_ICE_KEY_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_5( \
+ TZ_SYSCALL_PARAM_TYPE_VAL, \
+ TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL, \
+ TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_1( \
+ TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define ICE_KEY_SIZE 32
+#define ICE_SALT_SIZE 32
+
+static uint8_t ice_key[ICE_KEY_SIZE];
+static uint8_t ice_salt[ICE_KEY_SIZE];
+
+int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt,
+ char *storage_type)
+{
+ struct scm_desc desc = {0};
+ int ret, ret1;
+ char *tzbuf_key = (char *)ice_key;
+ char *tzbuf_salt = (char *)ice_salt;
+ char *s_type = storage_type;
+
+ uint32_t smc_id = 0;
+ u32 tzbuflen_key = sizeof(ice_key);
+ u32 tzbuflen_salt = sizeof(ice_salt);
+
+ if (index < MIN_ICE_KEY_INDEX || index > MAX_ICE_KEY_INDEX) {
+ pr_err("%s Invalid index %d\n", __func__, index);
+ return -EINVAL;
+ }
+ if (!key || !salt) {
+ pr_err("%s Invalid key/salt\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!tzbuf_key || !tzbuf_salt) {
+ pr_err("%s No Memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ if (s_type == NULL) {
+ pr_err("%s Invalid Storage type\n", __func__);
+ return -EINVAL;
+ }
+
+ memset(tzbuf_key, 0, tzbuflen_key);
+ memset(tzbuf_salt, 0, tzbuflen_salt);
+
+ memcpy(ice_key, key, tzbuflen_key);
+ memcpy(ice_salt, salt, tzbuflen_salt);
+
+ dmac_flush_range(tzbuf_key, tzbuf_key + tzbuflen_key);
+ dmac_flush_range(tzbuf_salt, tzbuf_salt + tzbuflen_salt);
+
+ smc_id = TZ_ES_SET_ICE_KEY_ID;
+
+ desc.arginfo = TZ_ES_SET_ICE_KEY_PARAM_ID;
+ desc.args[0] = index;
+ desc.args[1] = virt_to_phys(tzbuf_key);
+ desc.args[2] = tzbuflen_key;
+ desc.args[3] = virt_to_phys(tzbuf_salt);
+ desc.args[4] = tzbuflen_salt;
+
+ ret = qcom_ice_setup_ice_hw((const char *)s_type, true);
+
+ if (ret) {
+ pr_err("%s: could not enable clocks: %d\n", __func__, ret);
+ goto out;
+ }
+
+ ret = scm_call2(smc_id, &desc);
+
+ if (ret) {
+ pr_err("%s: Set Key Error: %d\n", __func__, ret);
+ if (ret == -EBUSY) {
+ if (qcom_ice_setup_ice_hw((const char *)s_type, false))
+ pr_err("%s: clock disable failed\n", __func__);
+ goto out;
+ }
+ /*Try to invalidate the key to keep ICE in proper state*/
+ smc_id = TZ_ES_INVALIDATE_ICE_KEY_ID;
+ desc.arginfo = TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID;
+ desc.args[0] = index;
+ ret1 = scm_call2(smc_id, &desc);
+ if (ret1)
+ pr_err("%s: Invalidate Key Error: %d\n", __func__,
+ ret1);
+ goto out;
+ }
+ ret = qcom_ice_setup_ice_hw((const char *)s_type, false);
+
+out:
+ return ret;
+}
+
+int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type)
+{
+ struct scm_desc desc = {0};
+ int ret;
+
+ uint32_t smc_id = 0;
+
+ if (index < MIN_ICE_KEY_INDEX || index > MAX_ICE_KEY_INDEX) {
+ pr_err("%s Invalid index %d\n", __func__, index);
+ return -EINVAL;
+ }
+
+ if (storage_type == NULL) {
+ pr_err("%s Invalid Storage type\n", __func__);
+ return -EINVAL;
+ }
+
+ smc_id = TZ_ES_INVALIDATE_ICE_KEY_ID;
+
+ desc.arginfo = TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID;
+ desc.args[0] = index;
+
+ ret = qcom_ice_setup_ice_hw((const char *)storage_type, true);
+
+ if (ret) {
+ pr_err("%s: could not enable clocks: 0x%x\n", __func__, ret);
+ return ret;
+ }
+
+ ret = scm_call2(smc_id, &desc);
+
+ if (ret) {
+ pr_err("%s: Error: 0x%x\n", __func__, ret);
+ if (qcom_ice_setup_ice_hw((const char *)storage_type, false))
+ pr_err("%s: could not disable clocks\n", __func__);
+ } else {
+ ret = qcom_ice_setup_ice_hw((const char *)storage_type, false);
+ }
+
+ return ret;
+}
diff --git a/security/pfe/pfk_ice.h b/security/pfe/pfk_ice.h
new file mode 100644
index 0000000..31772e7
--- /dev/null
+++ b/security/pfe/pfk_ice.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PFK_ICE_H_
+#define PFK_ICE_H_
+
+/*
+ * PFK ICE
+ *
+ * ICE keys configuration through scm calls.
+ *
+ */
+
+#include <linux/types.h>
+
+int pfk_ice_init(void);
+int pfk_ice_deinit(void);
+
+int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt,
+ char *storage_type);
+int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type);
+
+
+#endif /* PFK_ICE_H_ */
diff --git a/security/pfe/pfk_internal.h b/security/pfe/pfk_internal.h
new file mode 100644
index 0000000..3214327
--- /dev/null
+++ b/security/pfe/pfk_internal.h
@@ -0,0 +1,34 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _PFK_INTERNAL_H_
+#define _PFK_INTERNAL_H_
+
+#include <linux/types.h>
+#include <crypto/ice.h>
+
+struct pfk_key_info {
+ const unsigned char *key;
+ const unsigned char *salt;
+ size_t key_size;
+ size_t salt_size;
+};
+
+int pfk_key_size_to_key_type(size_t key_size,
+ enum ice_crpto_key_size *key_size_type);
+
+bool pfe_is_inode_filesystem_type(const struct inode *inode,
+ const char *fs_type);
+
+char *inode_to_filename(const struct inode *inode);
+
+#endif /* _PFK_INTERNAL_H_ */
diff --git a/security/pfe/pfk_kc.c b/security/pfe/pfk_kc.c
new file mode 100644
index 0000000..eecc026
--- /dev/null
+++ b/security/pfe/pfk_kc.c
@@ -0,0 +1,906 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * PFK Key Cache
+ *
+ * Key Cache used internally in PFK.
+ * The purpose of the cache is to save access time to QSEE when loading keys.
+ * Currently the cache is the same size as the total number of keys that can
+ * be loaded to ICE. Since this number is relatively small, the algorithms for
+ * cache eviction are simple, linear and based on last usage timestamp, i.e
+ * the node that will be evicted is the one with the oldest timestamp.
+ * Empty entries always have the oldest timestamp.
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <crypto/ice.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+
+#include "pfk_kc.h"
+#include "pfk_ice.h"
+
+
+/** the first available index in ice engine */
+#define PFK_KC_STARTING_INDEX 2
+
+/** currently the only supported key and salt sizes */
+#define PFK_KC_KEY_SIZE 32
+#define PFK_KC_SALT_SIZE 32
+
+/** Table size */
+/* TODO replace by some constant from ice.h */
+#define PFK_KC_TABLE_SIZE ((32) - (PFK_KC_STARTING_INDEX))
+
+/** The maximum key and salt size */
+#define PFK_MAX_KEY_SIZE PFK_KC_KEY_SIZE
+#define PFK_MAX_SALT_SIZE PFK_KC_SALT_SIZE
+#define PFK_UFS "ufs"
+
+static DEFINE_SPINLOCK(kc_lock);
+static unsigned long flags;
+static bool kc_ready;
+static char *s_type = "sdcc";
+
+/**
+ * enum pfk_kc_entry_state - state of the entry inside kc table
+ *
+ * @FREE: entry is free
+ * @ACTIVE_ICE_PRELOAD: entry is actively used by ICE engine
+ and cannot be used by others. SCM call
+ to load key to ICE is pending to be performed
+ * @ACTIVE_ICE_LOADED: entry is actively used by ICE engine and
+ cannot be used by others. SCM call to load the
+ key to ICE was successfully executed and key is
+ now loaded
+ * @INACTIVE_INVALIDATING: entry is being invalidated during file close
+ and cannot be used by others until invalidation
+ is complete
+ * @INACTIVE: entry's key is already loaded, but is not
+ currently being used. It can be re-used for
+ optimization and to avoid SCM call cost or
+ it can be taken by another key if there are
+ no FREE entries
+ * @SCM_ERROR: error occurred while scm call was performed to
+ load the key to ICE
+ */
+enum pfk_kc_entry_state {
+ FREE,
+ ACTIVE_ICE_PRELOAD,
+ ACTIVE_ICE_LOADED,
+ INACTIVE_INVALIDATING,
+ INACTIVE,
+ SCM_ERROR
+};
+
+struct kc_entry {
+ unsigned char key[PFK_MAX_KEY_SIZE];
+ size_t key_size;
+
+ unsigned char salt[PFK_MAX_SALT_SIZE];
+ size_t salt_size;
+
+ u64 time_stamp;
+ u32 key_index;
+
+ struct task_struct *thread_pending;
+
+ enum pfk_kc_entry_state state;
+
+ /* ref count for the number of requests in the HW queue for this key */
+ int loaded_ref_cnt;
+ int scm_error;
+};
+
+static struct kc_entry kc_table[PFK_KC_TABLE_SIZE];
+
+/**
+ * kc_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the key cache is ready.
+ */
+static inline bool kc_is_ready(void)
+{
+ return kc_ready;
+}
+
+static inline void kc_spin_lock(void)
+{
+ spin_lock_irqsave(&kc_lock, flags);
+}
+
+static inline void kc_spin_unlock(void)
+{
+ spin_unlock_irqrestore(&kc_lock, flags);
+}
+
+/**
+ * kc_entry_is_available() - checks whether the entry is available
+ *
+ * Return true if it is , false otherwise or if invalid
+ * Should be invoked under spinlock
+ */
+static bool kc_entry_is_available(const struct kc_entry *entry)
+{
+ if (!entry)
+ return false;
+
+ return (entry->state == FREE || entry->state == INACTIVE);
+}
+
+/**
+ * kc_entry_wait_till_available() - waits till entry is available
+ *
+ * Returns 0 in case of success or -ERESTARTSYS if the wait was interrupted
+ * by signal
+ *
+ * Should be invoked under spinlock
+ */
+static int kc_entry_wait_till_available(struct kc_entry *entry)
+{
+ int res = 0;
+
+ while (!kc_entry_is_available(entry)) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (signal_pending(current)) {
+ res = -ERESTARTSYS;
+ break;
+ }
+ /* assuming only one thread can try to invalidate
+ * the same entry
+ */
+ entry->thread_pending = current;
+ kc_spin_unlock();
+ schedule();
+ kc_spin_lock();
+ }
+ set_current_state(TASK_RUNNING);
+
+ return res;
+}
+
+/**
+ * kc_entry_start_invalidating() - moves entry to state
+ * INACTIVE_INVALIDATING
+ * If entry is in use, waits till
+ * it gets available
+ * @entry: pointer to entry
+ *
+ * Return 0 in case of success, otherwise error
+ * Should be invoked under spinlock
+ */
+static int kc_entry_start_invalidating(struct kc_entry *entry)
+{
+ int res;
+
+ res = kc_entry_wait_till_available(entry);
+ if (res)
+ return res;
+
+ entry->state = INACTIVE_INVALIDATING;
+
+ return 0;
+}
+
+/**
+ * kc_entry_finish_invalidating() - moves entry to state FREE
+ * wakes up all the tasks waiting
+ * on it
+ *
+ * @entry: pointer to entry
+ *
+ * Return 0 in case of success, otherwise error
+ * Should be invoked under spinlock
+ */
+static void kc_entry_finish_invalidating(struct kc_entry *entry)
+{
+ if (!entry)
+ return;
+
+ if (entry->state != INACTIVE_INVALIDATING)
+ return;
+
+ entry->state = FREE;
+}
+
+/**
+ * kc_min_entry() - compare two entries to find one with minimal time
+ * @a: ptr to the first entry. If NULL the other entry will be returned
+ * @b: pointer to the second entry
+ *
+ * Return the entry which timestamp is the minimal, or b if a is NULL
+ */
+static inline struct kc_entry *kc_min_entry(struct kc_entry *a,
+ struct kc_entry *b)
+{
+ if (!a)
+ return b;
+
+ if (time_before64(b->time_stamp, a->time_stamp))
+ return b;
+
+ return a;
+}
+
+/**
+ * kc_entry_at_index() - return entry at specific index
+ * @index: index of entry to be accessed
+ *
+ * Return entry
+ * Should be invoked under spinlock
+ */
+static struct kc_entry *kc_entry_at_index(int index)
+{
+ return &(kc_table[index]);
+}
+
+/**
+ * kc_find_key_at_index() - find kc entry starting at specific index
+ * @key: key to look for
+ * @key_size: the key size
+ * @salt: salt to look for
+ * @salt_size: the salt size
+ * @sarting_index: index to start search with, if entry found, updated with
+ * index of that entry
+ *
+ * Return entry or NULL in case of error
+ * Should be invoked under spinlock
+ */
+static struct kc_entry *kc_find_key_at_index(const unsigned char *key,
+ size_t key_size, const unsigned char *salt, size_t salt_size,
+ int *starting_index)
+{
+ struct kc_entry *entry = NULL;
+ int i = 0;
+
+ for (i = *starting_index; i < PFK_KC_TABLE_SIZE; i++) {
+ entry = kc_entry_at_index(i);
+
+ if (salt != NULL) {
+ if (entry->salt_size != salt_size)
+ continue;
+
+ if (memcmp(entry->salt, salt, salt_size) != 0)
+ continue;
+ }
+
+ if (entry->key_size != key_size)
+ continue;
+
+ if (memcmp(entry->key, key, key_size) == 0) {
+ *starting_index = i;
+ return entry;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * kc_find_key() - find kc entry
+ * @key: key to look for
+ * @key_size: the key size
+ * @salt: salt to look for
+ * @salt_size: the salt size
+ *
+ * Return entry or NULL in case of error
+ * Should be invoked under spinlock
+ */
+static struct kc_entry *kc_find_key(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size)
+{
+ int index = 0;
+
+ return kc_find_key_at_index(key, key_size, salt, salt_size, &index);
+}
+
+/**
+ * kc_find_oldest_entry_non_locked() - finds the entry with minimal timestamp
+ * that is not locked
+ *
+ * Returns entry with minimal timestamp. Empty entries have timestamp
+ * of 0, therefore they are returned first.
+ * If all the entries are locked, will return NULL
+ * Should be invoked under spin lock
+ */
+static struct kc_entry *kc_find_oldest_entry_non_locked(void)
+{
+ struct kc_entry *curr_min_entry = NULL;
+ struct kc_entry *entry = NULL;
+ int i = 0;
+
+ for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+ entry = kc_entry_at_index(i);
+
+ if (entry->state == FREE)
+ return entry;
+
+ if (entry->state == INACTIVE)
+ curr_min_entry = kc_min_entry(curr_min_entry, entry);
+ }
+
+ return curr_min_entry;
+}
+
+/**
+ * kc_update_timestamp() - updates timestamp of entry to current
+ *
+ * @entry: entry to update
+ *
+ */
+static void kc_update_timestamp(struct kc_entry *entry)
+{
+ if (!entry)
+ return;
+
+ entry->time_stamp = get_jiffies_64();
+}
+
+/**
+ * kc_clear_entry() - clear the key from entry and mark entry not in use
+ *
+ * @entry: pointer to entry
+ *
+ * Should be invoked under spinlock
+ */
+static void kc_clear_entry(struct kc_entry *entry)
+{
+ if (!entry)
+ return;
+
+ memset(entry->key, 0, entry->key_size);
+ memset(entry->salt, 0, entry->salt_size);
+
+ entry->key_size = 0;
+ entry->salt_size = 0;
+
+ entry->time_stamp = 0;
+ entry->scm_error = 0;
+
+ entry->state = FREE;
+
+ entry->loaded_ref_cnt = 0;
+ entry->thread_pending = NULL;
+}
+
+/**
+ * kc_update_entry() - replaces the key in given entry and
+ * loads the new key to ICE
+ *
+ * @entry: entry to replace key in
+ * @key: key
+ * @key_size: key_size
+ * @salt: salt
+ * @salt_size: salt_size
+ *
+ * The previous key is securely released and wiped, the new one is loaded
+ * to ICE.
+ * Should be invoked under spinlock
+ */
+static int kc_update_entry(struct kc_entry *entry, const unsigned char *key,
+ size_t key_size, const unsigned char *salt, size_t salt_size)
+{
+ int ret;
+
+ kc_clear_entry(entry);
+
+ memcpy(entry->key, key, key_size);
+ entry->key_size = key_size;
+
+ memcpy(entry->salt, salt, salt_size);
+ entry->salt_size = salt_size;
+
+ /* Mark entry as no longer free before releasing the lock */
+ entry->state = ACTIVE_ICE_PRELOAD;
+ kc_spin_unlock();
+
+ ret = qti_pfk_ice_set_key(entry->key_index, entry->key,
+ entry->salt, s_type);
+
+ kc_spin_lock();
+ return ret;
+}
+
+/**
+ * pfk_kc_init() - init function
+ *
+ * Return 0 in case of success, error otherwise
+ */
+int pfk_kc_init(void)
+{
+ int i = 0;
+ struct kc_entry *entry = NULL;
+
+ kc_spin_lock();
+ for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+ entry = kc_entry_at_index(i);
+ entry->key_index = PFK_KC_STARTING_INDEX + i;
+ }
+ kc_ready = true;
+ kc_spin_unlock();
+
+ return 0;
+}
+
+/**
+ * pfk_kc_denit() - deinit function
+ *
+ * Return 0 in case of success, error otherwise
+ */
+int pfk_kc_deinit(void)
+{
+ int res = pfk_kc_clear();
+ kc_ready = false;
+
+ return res;
+}
+
+/**
+ * pfk_kc_load_key_start() - retrieve the key from cache or add it if
+ * it's not there and return the ICE hw key index in @key_index.
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ * @salt: pointer to the salt
+ * @salt_size: the size of the salt
+ * @key_index: the pointer to key_index where the output will be stored
+ * @async: whether scm calls are allowed in the caller context
+ *
+ * If key is present in cache, than the key_index will be retrieved from cache.
+ * If it is not present, the oldest entry from kc table will be evicted,
+ * the key will be loaded to ICE via QSEE to the index that is the evicted
+ * entry number and stored in cache.
+ * Entry that is going to be used is marked as being used, it will mark
+ * as not being used when ICE finishes using it and pfk_kc_load_key_end
+ * will be invoked.
+ * As QSEE calls can only be done from a non-atomic context, when @async flag
+ * is set to 'false', it specifies that it is ok to make the calls in the
+ * current context. Otherwise, when @async is set, the caller should retry the
+ * call again from a different context, and -EAGAIN error will be returned.
+ *
+ * Return 0 in case of success, error otherwise
+ */
+int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size, u32 *key_index,
+ bool async)
+{
+ int ret = 0;
+ struct kc_entry *entry = NULL;
+ bool entry_exists = false;
+
+ if (!kc_is_ready())
+ return -ENODEV;
+
+ if (!key || !salt || !key_index) {
+ pr_err("%s key/salt/key_index NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (key_size != PFK_KC_KEY_SIZE) {
+ pr_err("unsupported key size %zu\n", key_size);
+ return -EINVAL;
+ }
+
+ if (salt_size != PFK_KC_SALT_SIZE) {
+ pr_err("unsupported salt size %zu\n", salt_size);
+ return -EINVAL;
+ }
+
+ kc_spin_lock();
+
+ entry = kc_find_key(key, key_size, salt, salt_size);
+ if (!entry) {
+ if (async) {
+ pr_debug("%s task will populate entry\n", __func__);
+ kc_spin_unlock();
+ return -EAGAIN;
+ }
+
+ entry = kc_find_oldest_entry_non_locked();
+ if (!entry) {
+ /* could not find a single non locked entry,
+ * return EBUSY to upper layers so that the
+ * request will be rescheduled
+ */
+ kc_spin_unlock();
+ return -EBUSY;
+ }
+ } else {
+ entry_exists = true;
+ }
+
+ pr_debug("entry with index %d is in state %d\n",
+ entry->key_index, entry->state);
+
+ switch (entry->state) {
+ case (INACTIVE):
+ if (entry_exists) {
+ kc_update_timestamp(entry);
+ entry->state = ACTIVE_ICE_LOADED;
+
+ if (!strcmp(s_type, (char *)PFK_UFS)) {
+ if (async)
+ entry->loaded_ref_cnt++;
+ } else {
+ entry->loaded_ref_cnt++;
+ }
+ break;
+ }
+ case (FREE):
+ ret = kc_update_entry(entry, key, key_size, salt, salt_size);
+ if (ret) {
+ entry->state = SCM_ERROR;
+ entry->scm_error = ret;
+ pr_err("%s: key load error (%d)\n", __func__, ret);
+ } else {
+ kc_update_timestamp(entry);
+ entry->state = ACTIVE_ICE_LOADED;
+
+ /*
+ * In case of UFS only increase ref cnt for async calls,
+ * sync calls from within work thread do not pass
+ * requests further to HW
+ */
+ if (!strcmp(s_type, (char *)PFK_UFS)) {
+ if (async)
+ entry->loaded_ref_cnt++;
+ } else {
+ entry->loaded_ref_cnt++;
+ }
+ }
+ break;
+ case (ACTIVE_ICE_PRELOAD):
+ case (INACTIVE_INVALIDATING):
+ ret = -EAGAIN;
+ break;
+ case (ACTIVE_ICE_LOADED):
+ kc_update_timestamp(entry);
+
+ if (!strcmp(s_type, (char *)PFK_UFS)) {
+ if (async)
+ entry->loaded_ref_cnt++;
+ } else {
+ entry->loaded_ref_cnt++;
+ }
+ break;
+ case(SCM_ERROR):
+ ret = entry->scm_error;
+ kc_clear_entry(entry);
+ entry->state = FREE;
+ break;
+ default:
+ pr_err("invalid state %d for entry with key index %d\n",
+ entry->state, entry->key_index);
+ ret = -EINVAL;
+ }
+
+ *key_index = entry->key_index;
+ kc_spin_unlock();
+
+ return ret;
+}
+
+/**
+ * pfk_kc_load_key_end() - finish the process of key loading that was started
+ * by pfk_kc_load_key_start
+ * by marking the entry as not
+ * being in use
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ * @salt: pointer to the salt
+ * @salt_size: the size of the salt
+ *
+ */
+void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size)
+{
+ struct kc_entry *entry = NULL;
+ struct task_struct *tmp_pending = NULL;
+ int ref_cnt = 0;
+
+ if (!kc_is_ready())
+ return;
+
+ if (!key || !salt)
+ return;
+
+ if (key_size != PFK_KC_KEY_SIZE)
+ return;
+
+ if (salt_size != PFK_KC_SALT_SIZE)
+ return;
+
+ kc_spin_lock();
+
+ entry = kc_find_key(key, key_size, salt, salt_size);
+ if (!entry) {
+ kc_spin_unlock();
+ pr_err("internal error, there should an entry to unlock\n");
+
+ return;
+ }
+ ref_cnt = --entry->loaded_ref_cnt;
+
+ if (ref_cnt < 0)
+ pr_err("internal error, ref count should never be negative\n");
+
+ if (!ref_cnt) {
+ entry->state = INACTIVE;
+ /*
+ * wake-up invalidation if it's waiting
+ * for the entry to be released
+ */
+ if (entry->thread_pending) {
+ tmp_pending = entry->thread_pending;
+ entry->thread_pending = NULL;
+
+ kc_spin_unlock();
+ wake_up_process(tmp_pending);
+ return;
+ }
+ }
+
+ kc_spin_unlock();
+}
+
+/**
+ * pfk_kc_remove_key() - remove the key from cache and from ICE engine
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ * @salt: pointer to the key
+ * @salt_size: the size of the key
+ *
+ * Return 0 in case of success, error otherwise (also in case of non
+ * (existing key)
+ */
+int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size)
+{
+ struct kc_entry *entry = NULL;
+ int res = 0;
+
+ if (!kc_is_ready())
+ return -ENODEV;
+
+ if (!key)
+ return -EINVAL;
+
+ if (!salt)
+ return -EINVAL;
+
+ if (key_size != PFK_KC_KEY_SIZE)
+ return -EINVAL;
+
+ if (salt_size != PFK_KC_SALT_SIZE)
+ return -EINVAL;
+
+ kc_spin_lock();
+
+ entry = kc_find_key(key, key_size, salt, salt_size);
+ if (!entry) {
+ pr_debug("%s: key does not exist\n", __func__);
+ kc_spin_unlock();
+ return -EINVAL;
+ }
+
+ res = kc_entry_start_invalidating(entry);
+ if (res != 0) {
+ kc_spin_unlock();
+ return res;
+ }
+ kc_clear_entry(entry);
+
+ kc_spin_unlock();
+
+ qti_pfk_ice_invalidate_key(entry->key_index, s_type);
+
+ kc_spin_lock();
+ kc_entry_finish_invalidating(entry);
+ kc_spin_unlock();
+
+ return 0;
+}
+
+/**
+ * pfk_kc_remove_key() - remove the key from cache and from ICE engine
+ * when no salt is available. Will only search key part, if there are several,
+ * all will be removed
+ *
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ *
+ * Return 0 in case of success, error otherwise (also for non-existing key)
+ */
+int pfk_kc_remove_key(const unsigned char *key, size_t key_size)
+{
+ struct kc_entry *entry = NULL;
+ int index = 0;
+ int temp_indexes[PFK_KC_TABLE_SIZE] = {0};
+ int temp_indexes_size = 0;
+ int i = 0;
+ int res = 0;
+
+ if (!kc_is_ready())
+ return -ENODEV;
+
+ if (!key)
+ return -EINVAL;
+
+ if (key_size != PFK_KC_KEY_SIZE)
+ return -EINVAL;
+
+ memset(temp_indexes, -1, sizeof(temp_indexes));
+
+ kc_spin_lock();
+
+ entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
+ if (!entry) {
+ pr_err("%s: key does not exist\n", __func__);
+ kc_spin_unlock();
+ return -EINVAL;
+ }
+
+ res = kc_entry_start_invalidating(entry);
+ if (res != 0) {
+ kc_spin_unlock();
+ return res;
+ }
+
+ temp_indexes[temp_indexes_size++] = index;
+ kc_clear_entry(entry);
+
+ /* let's clean additional entries with the same key if there are any */
+ do {
+ index++;
+ entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
+ if (!entry)
+ break;
+
+ res = kc_entry_start_invalidating(entry);
+ if (res != 0) {
+ kc_spin_unlock();
+ goto out;
+ }
+
+ temp_indexes[temp_indexes_size++] = index;
+
+ kc_clear_entry(entry);
+
+
+ } while (true);
+
+ kc_spin_unlock();
+
+ temp_indexes_size--;
+ for (i = temp_indexes_size; i >= 0 ; i--)
+ qti_pfk_ice_invalidate_key(
+ kc_entry_at_index(temp_indexes[i])->key_index,
+ s_type);
+
+ /* fall through */
+ res = 0;
+
+out:
+ kc_spin_lock();
+ for (i = temp_indexes_size; i >= 0 ; i--)
+ kc_entry_finish_invalidating(
+ kc_entry_at_index(temp_indexes[i]));
+ kc_spin_unlock();
+
+ return res;
+}
+
+/**
+ * pfk_kc_clear() - clear the table and remove all keys from ICE
+ *
+ * Return 0 on success, error otherwise
+ *
+ */
+int pfk_kc_clear(void)
+{
+ struct kc_entry *entry = NULL;
+ int i = 0;
+ int res = 0;
+
+ if (!kc_is_ready())
+ return -ENODEV;
+
+ kc_spin_lock();
+ for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+ entry = kc_entry_at_index(i);
+ res = kc_entry_start_invalidating(entry);
+ if (res != 0) {
+ kc_spin_unlock();
+ goto out;
+ }
+ kc_clear_entry(entry);
+ }
+ kc_spin_unlock();
+
+ for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
+ qti_pfk_ice_invalidate_key(kc_entry_at_index(i)->key_index,
+ s_type);
+
+ /* fall through */
+ res = 0;
+out:
+ kc_spin_lock();
+ for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
+ kc_entry_finish_invalidating(kc_entry_at_index(i));
+ kc_spin_unlock();
+
+ return res;
+}
+
+/**
+ * pfk_kc_clear_on_reset() - clear the table and remove all keys from ICE
+ * The assumption is that at this point we don't have any pending transactions
+ * Also, there is no need to clear keys from ICE
+ *
+ * Return 0 on success, error otherwise
+ *
+ */
+void pfk_kc_clear_on_reset(void)
+{
+ struct kc_entry *entry = NULL;
+ int i = 0;
+
+ if (!kc_is_ready())
+ return;
+
+ kc_spin_lock();
+ for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+ entry = kc_entry_at_index(i);
+ kc_clear_entry(entry);
+ }
+ kc_spin_unlock();
+}
+
+static int pfk_kc_find_storage_type(char **device)
+{
+ char boot[20] = {'\0'};
+ char *match = (char *)strnstr(saved_command_line,
+ "androidboot.bootdevice=",
+ strlen(saved_command_line));
+ if (match) {
+ memcpy(boot, (match + strlen("androidboot.bootdevice=")),
+ sizeof(boot) - 1);
+ if (strnstr(boot, PFK_UFS, strlen(boot)))
+ *device = PFK_UFS;
+
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int __init pfk_kc_pre_init(void)
+{
+ return pfk_kc_find_storage_type(&s_type);
+}
+
+static void __exit pfk_kc_exit(void)
+{
+ s_type = NULL;
+}
+
+module_init(pfk_kc_pre_init);
+module_exit(pfk_kc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Per-File-Key-KC driver");
diff --git a/security/pfe/pfk_kc.h b/security/pfe/pfk_kc.h
new file mode 100644
index 0000000..6adeee2
--- /dev/null
+++ b/security/pfe/pfk_kc.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PFK_KC_H_
+#define PFK_KC_H_
+
+#include <linux/types.h>
+
+int pfk_kc_init(void);
+int pfk_kc_deinit(void);
+int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size, u32 *key_index,
+ bool async);
+void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size);
+int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size);
+int pfk_kc_remove_key(const unsigned char *key, size_t key_size);
+int pfk_kc_clear(void);
+void pfk_kc_clear_on_reset(void);
+extern char *saved_command_line;
+
+
+#endif /* PFK_KC_H_ */
diff --git a/security/security.c b/security/security.c
index e43c50c..35c8dce 100644
--- a/security/security.c
+++ b/security/security.c
@@ -525,6 +525,14 @@
}
EXPORT_SYMBOL_GPL(security_inode_create);
+int security_inode_post_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode)
+{
+ if (unlikely(IS_PRIVATE(dir)))
+ return 0;
+ return call_int_hook(inode_post_create, 0, dir, dentry, mode);
+}
+
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry)
{
@@ -1700,6 +1708,8 @@
.inode_init_security =
LIST_HEAD_INIT(security_hook_heads.inode_init_security),
.inode_create = LIST_HEAD_INIT(security_hook_heads.inode_create),
+ .inode_post_create =
+ LIST_HEAD_INIT(security_hook_heads.inode_post_create),
.inode_link = LIST_HEAD_INIT(security_hook_heads.inode_link),
.inode_unlink = LIST_HEAD_INIT(security_hook_heads.inode_unlink),
.inode_symlink =
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 52f3c55..84d9a2e 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -348,26 +348,27 @@
struct avc_xperms_decision_node *xpd_node;
struct extended_perms_decision *xpd;
- xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep, GFP_NOWAIT);
+ xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep,
+ GFP_NOWAIT | __GFP_NOWARN);
if (!xpd_node)
return NULL;
xpd = &xpd_node->xpd;
if (which & XPERMS_ALLOWED) {
xpd->allowed = kmem_cache_zalloc(avc_xperms_data_cachep,
- GFP_NOWAIT);
+ GFP_NOWAIT | __GFP_NOWARN);
if (!xpd->allowed)
goto error;
}
if (which & XPERMS_AUDITALLOW) {
xpd->auditallow = kmem_cache_zalloc(avc_xperms_data_cachep,
- GFP_NOWAIT);
+ GFP_NOWAIT | __GFP_NOWARN);
if (!xpd->auditallow)
goto error;
}
if (which & XPERMS_DONTAUDIT) {
xpd->dontaudit = kmem_cache_zalloc(avc_xperms_data_cachep,
- GFP_NOWAIT);
+ GFP_NOWAIT | __GFP_NOWARN);
if (!xpd->dontaudit)
goto error;
}
@@ -395,7 +396,8 @@
{
struct avc_xperms_node *xp_node;
- xp_node = kmem_cache_zalloc(avc_xperms_cachep, GFP_NOWAIT);
+ xp_node = kmem_cache_zalloc(avc_xperms_cachep,
+ GFP_NOWAIT | __GFP_NOWARN);
if (!xp_node)
return xp_node;
INIT_LIST_HEAD(&xp_node->xpd_head);
@@ -548,7 +550,7 @@
{
struct avc_node *node;
- node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT);
+ node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT | __GFP_NOWARN);
if (!node)
goto out;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 8e12ffe..5f3fa60 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1092,10 +1092,9 @@
goto out_err;
opts->mnt_opts_flags = kcalloc(NUM_SEL_MNT_OPTS, sizeof(int), GFP_ATOMIC);
- if (!opts->mnt_opts_flags) {
- kfree(opts->mnt_opts);
+ if (!opts->mnt_opts_flags)
goto out_err;
- }
+
if (fscontext) {
opts->mnt_opts[num_mnt_opts] = fscontext;
@@ -1118,6 +1117,7 @@
return 0;
out_err:
+ security_free_mnt_opts(opts);
kfree(context);
kfree(defcontext);
kfree(fscontext);
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index 43535cd..60cdcf4 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -25,8 +25,9 @@
#include <linux/in.h>
#include <linux/spinlock.h>
#include <net/net_namespace.h>
-#include "flask.h"
-#include "avc.h"
+//#include "flask.h"
+//#include "avc.h"
+#include "security.h"
struct task_security_struct {
u32 osid; /* SID prior to last execve */
@@ -52,6 +53,8 @@
u32 sid; /* SID of this object */
u16 sclass; /* security class of this object */
unsigned char initialized; /* initialization flag */
+ u32 tag; /* Per-File-Encryption tag */
+ void *pfk_data; /* Per-File-Key data from ecryptfs */
struct mutex lock;
};
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 308a286..b8e98c1 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -12,7 +12,6 @@
#include <linux/dcache.h>
#include <linux/magic.h>
#include <linux/types.h>
-#include "flask.h"
#define SECSID_NULL 0x00000000 /* unspecified SID */
#define SECSID_WILD 0xffffffff /* wildcard SID */
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 9b517a4..a6b0970 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -1434,7 +1434,7 @@
scontext_len, &context, def_sid);
if (rc == -EINVAL && force) {
context.str = str;
- context.len = scontext_len;
+ context.len = strlen(str) + 1;
str = NULL;
} else if (rc)
goto out_unlock;
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 5143801..180261d 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -976,9 +976,9 @@
struct snd_rawmidi_runtime *runtime = substream->runtime;
unsigned long appl_ptr;
- spin_lock_irqsave(&runtime->lock, flags);
if (userbuf)
mutex_lock(&runtime->realloc_mutex);
+ spin_lock_irqsave(&runtime->lock, flags);
while (count > 0 && runtime->avail) {
count1 = runtime->buffer_size - runtime->appl_ptr;
if (count1 > count)