Merge "defconfig: Enable EUD driver for Kona"
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index 923d145..d9365c0 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -265,6 +265,12 @@
DSPP block, the AD offsets must be offset from the
corresponding DSPP base.
- qcom,sde-dspp-ad-version A u32 value indicating the version of the AD hardware
+- qcom,sde-dspp-ltm-version A u32 value indicating the major(upper 16 bits) and minor(lower 16 bits)
+ version of the LTM hardware
+- qcom,sde-dspp-ltm-off: Array of u32 offsets indicate the LTM block offsets from the
+ DSPP offsets. Since LTM hardware is represented as part of
+ DSPP block, the LTM offsets are calculated based on the
+ corresponding DSPP base.
- qcom,sde-vbif-id: Array of vbif ids corresponding to the
offsets defined in property: qcom,sde-vbif-off.
- qcom,sde-vbif-default-ot-rd-limit: A u32 value indicates the default read OT limit
diff --git a/Documentation/devicetree/bindings/input/qpnp-power-on.txt b/Documentation/devicetree/bindings/input/qpnp-power-on.txt
new file mode 100644
index 0000000..6789c62
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/qpnp-power-on.txt
@@ -0,0 +1,241 @@
+Qualcomm Technologies, Inc. QPNP Power-on PMIC Peripheral Device Tree Bindings
+
+qpnp-power-on devices support the power-on (PON) peripheral of Qualcomm
+Technologies, Inc. PMICs. The supported functionality includes power on/off
+reason, key press/release detection, PMIC reset configurations and other PON
+specific features. The PON module supports multiple physical power-on (KPDPWR_N,
+CBLPWR) and reset (KPDPWR_N, RESIN, KPDPWR+RESIN) sources. This peripheral is
+connected to the host processor via the SPMI interface.
+
+Required properties:
+- compatible: Must be "qcom,qpnp-power-on"
+- reg: Specifies the SPMI address and size for this PON
+ (power-on) peripheral.
+
+Optional properties:
+- interrupts: Specifies the interrupts associated with PON.
+- interrupt-names: Specifies the interrupt names associated with
+ the interrupts property. Must be a subset of
+ "kpdpwr", "kpdpwr-bark", "resin", "resin-bark",
+ "cblpwr", "kpdpwr-resin-bark", and
+ "pmic-wd-bark". Bark interrupts are associated
+ with system reset configuration to allow default
+ reset configuration to be activated. If system
+ reset configuration is not supported then bark
+ interrupts are nops. Additionally, the
+ "pmic-wd-bark" interrupt can be added if the
+ system needs to handle PMIC watchdog barks.
+- qcom,pon-dbc-delay: The debounce delay for the power-key interrupt
+ specified in us.
+ Possible values for GEN1 PON are:
+ 15625, 31250, 62500, 125000, 250000, 500000,
+ 1000000 and 2000000.
+ Possible values for GEN2 PON are:
+ 62, 123, 245, 489, 977, 1954, 3907, 7813,
+ 15625, 31250, 62500, 125000 and 250000.
+ Intermediate value is rounded down to the
+ nearest valid value.
+- qcom,system-reset: Boolean which specifies that this PON peripheral
+ can be used to reset the system. This property
+ can only be used by one device on the system. It
+ is an error to include it more than once.
+- qcom,s3-debounce: The debounce delay for stage 3 reset trigger in
+ secs. The values range from 0 to 128.
+- qcom,s3-src: The source for stage 3 reset. It can be one of
+ "kpdpwr", "resin", "kpdpwr-or-resin" or
+ "kpdpwr-and-resin".
+- qcom,uvlo-panic: Boolean indicating that the device should
+ trigger a controlled panic shutdown if a restart
+ was caused by under voltage lock-out (UVLO).
+- qcom,clear-warm-reset: Boolean which specifies that the WARM_RESET
+ reason registers need to be cleared for this
+ target. The property is used for the targets
+ which have a hardware feature to catch resets
+ which aren't triggered by the application
+ processor. In such cases clearing WARM_REASON
+ registers across processor resets keeps the
+ registers in a useful state.
+- qcom,secondary-pon-reset: Boolean property which indicates that the PON
+ peripheral is a secondary PON device which
+ needs to be configured during reset in addition
+ to the primary PON device that is configured
+ for system reset through qcom,system-reset
+ property.
+ This should not be defined along with the
+ qcom,system-reset property.
+- qcom,store-hard-reset-reason: Boolean property which if set will store the
+ hardware reset reason to SOFT_RB_SPARE register
+ of the core PMIC PON peripheral.
+- qcom,warm-reset-poweroff-type: Poweroff type required to be configured
+ on PS_HOLD reset control register when the
+ system goes for warm reset. If this property is
+ not specified, then the default type, warm reset
+ will be configured to PS_HOLD reset control
+ register.
+ Supported values: PON_POWER_OFF_TYPE_* found in
+ include/dt-bindings/input/qcom,qpnp-power-on.h
+- qcom,hard-reset-poweroff-type: Same description as
+ qcom,warm-reset-poweroff-type but this applies
+ for the system hard reset case.
+- qcom,shutdown-poweroff-type: Same description as qcom,warm-reset-poweroff-
+ type but this applies for the system shutdown
+ case.
+- qcom,kpdpwr-sw-debounce: Boolean property to enable the debounce logic
+ on the KPDPWR_N rising edge.
+- qcom,resin-pon-reset: Boolean property which indicates that resin
+ needs to be configured during reset in addition
+ to the primary PON device that is configured
+ for system reset through qcom,system-reset
+ property.
+- qcom,resin-warm-reset-type: Poweroff type required to be configured on
+ RESIN reset control register when the system
+ initiates warm reset. If this property is not
+ specified, then the default type, warm reset
+ will be configured to RESIN reset control
+ register. This property is effective only if
+ qcom,resin-pon-reset is defined.
+ Supported values: PON_POWER_OFF_TYPE_* found in
+ include/dt-bindings/input/qcom,qpnp-power-on.h
+- qcom,resin-hard-reset-type: Same description as qcom,resin-warm-reset-type
+ but this applies for the system hard reset case.
+- qcom,resin-shutdown-type: Same description as qcom,resin-warm-reset-type
+ but this applies for the system shutdown case.
+- qcom,resin-shutdown-disable: Boolean property to disable RESIN power off
+ trigger during system shutdown case.
+ This property is effective only if
+ qcom,resin-pon-reset is defined.
+- qcom,resin-hard-reset-disable: Boolean property to disable RESIN power
+ off trigger during system hard reset case.
+ This property is effective only if
+ qcom,resin-pon-reset is defined.
+- qcom,ps-hold-shutdown-disable: Boolean property to disable PS_HOLD
+ power off trigger during system shutdown case.
+- qcom,ps-hold-hard-reset-disable: Boolean property to disable PS_HOLD
+ power off trigger during system hard reset case.
+
+Optional Sub-nodes:
+- qcom,pon_1 ... qcom,pon_n: These PON child nodes correspond to features
+ supported by the PON peripheral including reset
+ configurations, pushbutton keys, and regulators.
+
+Sub-node properties:
+
+Sub-nodes (if defined) should belong to either a PON configuration or a
+regulator configuration.
+
+Regulator sub-node required properties:
+- regulator-name: Regulator name for the PON regulator that is
+ being configured.
+- qcom,pon-spare-reg-addr: Register offset from the base address of the
+ PON peripheral that needs to be configured for
+ the regulator being controlled.
+- qcom,pon-spare-reg-bit: Bit position in the specified register that
+ needs to be configured for the regulator being
+ controlled.
+
+PON sub-node required properties:
+- qcom,pon-type: The type of PON/RESET source. Supported values:
+ 0 = KPDPWR
+ 1 = RESIN
+ 2 = CBLPWR
+ 3 = KPDPWR_RESIN
+ These values are PON_POWER_ON_TYPE_* found in
+ include/dt-bindings/input/qcom,qpnp-power-on.h
+
+PON sub-node optional properties:
+- qcom,pull-up: Boolean flag indicating if a pull-up resistor
+ should be enabled for the input.
+- qcom,support-reset: Indicates if this PON source supports
+ reset functionality.
+ 0 = Not supported
+ 1 = Supported
+ If this property is not defined, then default S2
+ reset configurations should not be modified.
+- qcom,use-bark: Specify if this PON type needs to handle a bark
+ interrupt.
+- linux,code: The input key-code associated with the reset
+ source. The reset source in its default
+ configuration can be used to support standard
+ keys.
+
+The below mentioned properties are required only when qcom,support-reset DT
+property is defined and is set to 1.
+
+- qcom,s1-timer: The debounce timer for the BARK interrupt for
+ the reset source. Value is specified in ms.
+ Supported values are:
+ 0, 32, 56, 80, 128, 184, 272, 408, 608, 904,
+ 1352, 2048, 3072, 4480, 6720, 10256
+- qcom,s2-timer: The debounce timer for the S2 reset specified
+ in ms. On the expiry of this timer, the PMIC
+ executes the reset sequence.
+ Supported values are:
+ 0, 10, 50, 100, 250, 500, 1000, 2000
+- qcom,s2-type: The type of reset associated with this source.
+ Supported values:
+ 0 = SOFT_RESET (legacy)
+ 1 = WARM_RESET
+ 4 = SHUTDOWN
+ 5 = DVDD_SHUTDOWN
+ 7 = HARD_RESET
+ 8 = DVDD_HARD_RESET
+ These values are PON_POWER_OFF_TYPE_* found in
+ include/dt-bindings/input/qcom,qpnp-power-on.h
+
+Examples:
+ qcom,power-on@800 {
+ compatible = "qcom,qpnp-power-on";
+ reg = <0x800 0x100>;
+ interrupts = <0x0 0x8 0x0 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x8 0x1 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x8 0x4 IRQ_TYPE_EDGE_RISING>,
+ <0x0 0x8 0x5 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "kpdpwr", "resin", "resin-bark",
+ "kpdpwr-resin-bark";
+ qcom,pon-dbc-delay = <15625>;
+ qcom,system-reset;
+ qcom,s3-debounce = <32>;
+ qcom,s3-src = "resin";
+ qcom,clear-warm-reset;
+ qcom,store-hard-reset-reason;
+
+ qcom,pon_1 {
+ qcom,pon-type = <PON_POWER_ON_TYPE_KPDPWR>;
+ qcom,pull-up;
+ linux,code = <KEY_POWER>;
+ };
+
+ qcom,pon_2 {
+ qcom,pon-type = <PON_POWER_ON_TYPE_RESIN>;
+ qcom,support-reset = <1>;
+ qcom,pull-up;
+ qcom,s1-timer = <0>;
+ qcom,s2-timer = <2000>;
+ qcom,s2-type = <PON_POWER_OFF_TYPE_WARM_RESET>;
+ linux,code = <KEY_VOLUMEDOWN>;
+ qcom,use-bark;
+ };
+
+ qcom,pon_3 {
+ qcom,pon-type = <PON_POWER_ON_TYPE_KPDPWR_RESIN>;
+ qcom,support-reset = <1>;
+ qcom,s1-timer = <6720>;
+ qcom,s2-timer = <2000>;
+ qcom,s2-type = <PON_POWER_OFF_TYPE_HARD_RESET>;
+ qcom,pull-up;
+ qcom,use-bark;
+ };
+ };
+
+ qcom,power-on@800 {
+ compatible = "qcom,qpnp-power-on";
+ reg = <0x800 0x100>;
+ qcom,secondary-pon-reset;
+ qcom,hard-reset-poweroff-type = <PON_POWER_OFF_TYPE_SHUTDOWN>;
+
+ pon_perph_reg: qcom,pon_perph_reg {
+ regulator-name = "pon_spare_reg";
+ qcom,pon-spare-reg-addr = <0x8c>;
+ qcom,pon-spare-reg-bit = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/input/qti-haptics.txt b/Documentation/devicetree/bindings/input/qti-haptics.txt
new file mode 100644
index 0000000..b3daa49
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/qti-haptics.txt
@@ -0,0 +1,211 @@
+Qualcomm Technologies, Inc. Haptics driver
+
+Haptics peripheral in QTI PMICs can support different type of actuators or
+vibrators:
+ 1) Eccentric Rotation Mass (ERM);
+ 2) Linear Resonant Actuator (LRA).
+This binding document describes the properties for this module.
+
+Properties:
+
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: It can be one of the following:
+ "qcom,haptics",
+ "qcom,pm660-haptics",
+ "qcom,pm8150b-haptics".
+
+- reg
+ Usage: required
+ Value type: <u32>
+ Definition: Base address of haptics peripheral.
+
+- interrupts
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Peripheral interrupt specifier.
+
+- interrupt-names
+ Usage: required
+ Value type: <stringlist>
+ Definition: Interrupt names. This list must match up 1-to-1 with the
+ interrupts specified in the 'interrupts' property. Following
+ interrupts are required: "hap_play_irq", "hap_sc_irq".
+
+- qcom,actuator-type
+ Usage: optional
+ Value type: <string>
+ Definition: Specifies the type of the actuator connected on the output of
+ haptics module. Allowed values: "erm", "lra". If this is
+ not specified, then LRA type will be used by default.
+
+- qcom,vmax-mv
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the maximum allowed output voltage in millivolts
+ for the actuator. Value specified here will be rounded
+ off to the closest multiple of 116 mV. Allowed values:
+ 0 to 3596. If this is not specified, then 1800 mV will be
+ used by default.
+
+- qcom,ilim-ma
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the maximum allowed output current in mA for the
+ actuator. Allowed values: 400 or 800. If this is not
+ specified, 400 mA will be used by default.
+
+- qcom,play-rate-us
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the period at which each sample of the 8-byte waveform
+ registers is played. For ERM, this period is flexible and it
+ can be chosen based on the desired shape of the pattern.
+ For LRA, it should be set equal to the resonance period
+ specified in the LRA actuator datasheet. Allowed values are:
+ 0 to 20475. If this is not specified, 5715us play rate is used.
+
+- qcom,external-waveform-source
+ Usage: optional
+ Value type: <string>
+ Definition: The haptics module supports to play with internal constant
+ Vmax strength or play with patterns specified in its internal
+ 8-bytes waveform buffer. It can also play with the audio
+ LINE-IN signal or PWM waveform coming from LINE-IN/PWM pin.
+ This property specify the kind of the waveform resources
+ on the LINE-IN/PWM pins. Allowed values are: "audio", "pwm".
+ If this is not specified, internal signals (Vmax or buffer)
+ will be selected according to the requriement of the playing
+ waveforms.
+
+- vdd-supply
+ Usage: optional
+ Value type: <phandle>
+ Definition: Specifies the phandle of the regulator device which supplies
+ haptics module through VDD_HAP pin. This is only needed if VDD_HAP
+ is supplied from an external boost regulator instead of VPH_PWR.
+
+Following properties are specific only when LRA actuator is used:
+
+- qcom,lra-resonance-sig-shape
+ Usage: optional
+ Value type: <string>
+ Definition: Specifies the shape of the LRA resonance drive signal. Allowed
+ values: "sine", "square". If this is not specified, sinusoid
+ resonance driver signal is used.
+
+- qcom,lra-allow-variable-play-rate
+ Usage: optional
+ Value type: <empty>
+ Definition: If specified, "qcom,wf-play-rate-us" for LRA defined in each
+ effect could be different with the resonance period of the
+ LRA actuator.
+
+- qcom,lra-auto-resonance-mode
+ Usage: optional
+ Value type: <string>
+ Definition: Specifies the auto resonance technique for LRA. Allowed values are:
+ "zxd": zero crossing based discontinuous method;
+ "qwd": quarter wave drive method;
+
+Following properties could be specified in child nodes for defining vibrating
+waveforms/effects:
+
+- qcom,effect-id
+ Usage: required
+ Value type: <u32>
+ Definition: Specifies the effect ID that the client can request to play the
+ corresponding waveform defined in this child node. The ID is
+ normaly defined and sent from userspace for certain user
+ notification event.
+
+- qcom,wf-pattern
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Specifies the waveform pattern in a byte array that will be
+ played for the effect-id. The bit fields of each byte are:
+ [7]: drive direction, 0 - forward; 1 - reverse
+ [6]: overdrive, 0 -- 1x drive; 1 -- 2x drive
+ [5:1]: waveform amplitude
+ [0]: reserved.
+
+- qcom,wf-vmax-mv
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the maximum allowed output voltage in millivolts
+ for this effect. Value specified here will be rounded
+ off to the closest multiple of 116 mV. Allowed values:
+ 0 to 3596. If this is not specified, the value defined in
+ "qcom,vmax-mv" will be applied.
+
+- qcom,wf-play-rate-us
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the play period in microseconds for each byte pattern.
+ Allowed values are: 0 to 20475. For LRA actuator, if
+ "qcom,lra-allow-variable-play-rate" is defined, it could be
+ set to other values not equal to the resonance period of the
+ LRA actuator.
+
+- qcom,wf-repeat-count
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the repeat times for the waveform pattern. Allowed
+ values are: 1, 2, 4, 8, 16, 32, 64, 128.
+
+- qcom,wf-s-repeat-count
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the repeat times for each sample defined in
+ qcom,wf-pattern. Allowed values are: 1, 2, 4, 8.
+
+- qcom,wf-brake-pattern
+ Usage: optional
+ Value type: <prop-encoded-array>
+ Definition: Specifies the brake pattern with 4 elements used to enable the
+ internal reverse braking. Allowed values for each element are:
+ 0: no brake
+ 1: brake with (Vmax / 2) strength
+ 2: brake with Vmax strength
+ 3: brake with (2 * Vmax) strength
+ If this property is specified with an array of non-zero values,
+ then the brake pattern is applied at the end of the playing
+ waveform.
+
+- qcom,lra-auto-resonance-disable
+ Usage: optional
+ Value type: <empty>
+ Definition: If specified, the hardware feature of LRA auto resonance detection
+ is disabled.
+
+Example:
+ qcom,haptics@c000 {
+ compatible = "qcom,haptics";
+ reg = <0xc000 0x100>;
+ interrupts = <0x3 0xc0 0x0 IRQ_TYPE_EDGE_BOTH>,
+ <0x3 0xc0 0x1 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hap-sc-irq", "hap-play-irq";
+ qcom,actuator-type = "lra";
+ qcom,vmax-mv = <1800>;
+ qcom,ilim-ma = <400>;
+ qcom,play-rate-us = <8000>;
+ qcom,lra-resonance-sig-shape = "sine";
+ qcom,lra-auto-resonance-mode = "qwd";
+ qcom,lra-allow-variable-play-rate;
+
+ wf_0 {
+ /* CLICK effect */
+ qcom,effect-id = <0>;
+ qcom,wf-play-rate-us = <6250>;
+ qcom,wf-pattern = [3e 3e 3e];
+ qcom,lra-auto-resonance-disable;
+ };
+
+ wf_5 {
+ /* HEAVY_CLICK effect */
+ qcom,effect-id = <5>;
+ qcom,wf-play-rate-us = <6250>;
+ qcom,wf-pattern = [7e 7e 7e];
+ };
+ };
diff --git a/Documentation/devicetree/bindings/leds/leds-qti-tri-led.txt b/Documentation/devicetree/bindings/leds/leds-qti-tri-led.txt
new file mode 100644
index 0000000..e179f42
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-qti-tri-led.txt
@@ -0,0 +1,72 @@
+Qualcomm Technologies, Inc. TRI_LED driver specific bindings
+
+This binding document describes the properties of TRI_LED module in
+Qualcomm Technologies, Inc. PMIC chips.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: Must be "qcom,tri-led".
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Register base of the TRI_LED module and length.
+
+- nvmem-names:
+ Usage: optional
+ Value type: <string>
+ Definition: Nvmem device name for SDAM to do PBS trigger. It must be
+ defined as "pbs_sdam". This is required only for HR_LEDs.
+
+- nvmem:
+ Usage: optional
+ Value type: <phandle>
+ Definition: Phandle of the nvmem device name to access SDAM to do PBS
+ trigger. This is required only for HR_LEDs.
+
+Properties for child nodes:
+- pwms:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: The PWM device (phandle) used for controlling LED.
+
+- led-sources:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: see Documentation/devicetree/bindings/leds/common.txt;
+ Device current output identifiers are: 0 - LED1_EN,
+ 1 - LED2_EN, 2 - LED3_EN.
+
+- label:
+ Usage: optional
+ Value type: <string>
+ Definition: see Documentation/devicetree/bindings/leds/common.txt;
+
+- linux,default-trigger:
+ Usage: optional
+ Value_type: <string>
+ Definition: see Documentation/devicetree/bindings/leds/common.txt;
+
+Example:
+
+ pmi8998_rgb: tri-led@d000{
+ compatible = "qcom,tri-led";
+ reg = <0xd000 0x100>;
+
+ red {
+ label = "red";
+ pwms = <&pmi8998_lpg 4 1000000>;
+ led-sources = <0>;
+ };
+ green {
+ label = "green";
+ pwms = <&pmi8998_lpg 3 1000000>;
+ led-sources = <1>;
+ };
+ blue {
+ label = "blue";
+ pwms = <&pmi8998_lpg 2 1000000>;
+ led-sources = <2>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pci/pci-msm-msi.txt b/Documentation/devicetree/bindings/pci/pci-msm-msi.txt
new file mode 100644
index 0000000..446a067
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/pci-msm-msi.txt
@@ -0,0 +1,74 @@
+* MSM PCIe MSI controller
+
+=========
+Main node
+=========
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: Value to identify this is a MSM PCIe MSI controller
+
+- msi-controller:
+ Usage: required
+ Value type: <bool>
+ Definition: Indicates that this is a MSM PCIe MSI controller node
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Physical QGIC address (0x17a00040), MSI message address
+
+-interrupt-parent:
+ Usage: required
+ Value type: <phandle>
+ Definition: Phandle of the interrupt controller that services
+ interrupts for this device
+
+-interrupts:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Array of tuples which describe interrupt lines for PCIe MSI
+
+=======
+Example
+=======
+
+pcie0_msi: qcom,pcie0_msi {
+ compatible = "qcom,pci-msi";
+ msi-controller;
+ reg = <0x17a00040 0x0 0x0 0x0 0xff>;
+ interrupt-parent = <&pdc>;
+ interrupts = <GIC_SPI 832 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 833 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 834 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 835 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 836 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 837 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 838 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 839 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 840 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 841 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 842 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 843 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 844 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 845 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 846 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 847 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 848 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 849 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 850 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 851 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 852 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 853 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 854 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 855 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 856 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 857 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 858 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 859 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 860 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 861 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 862 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 863 IRQ_TYPE_EDGE_RISING>;
+};
diff --git a/Documentation/devicetree/bindings/pci/pci-msm.txt b/Documentation/devicetree/bindings/pci/pci-msm.txt
new file mode 100644
index 0000000..e9d411a
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/pci-msm.txt
@@ -0,0 +1,486 @@
+* MSM PCI express root complex
+
+=========
+Main node
+=========
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: Should be "qcom,pci-msm"
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Register ranges as listed in the reg-names property
+
+- reg-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: Should contain:
+ - "parf" MSM specific registers
+ - "phy" PCIe PHY registers
+ - "dbi" DesignWare PCIe registers
+ - "elbi" External local bus interface registers
+ - "iatu" Internal translation unit registers
+ - "config" PCIe device configuration space
+ - "io" PCIe device I/O registers
+ - "bars" PCIe device base address registers
+ - "tcsr" (opt) PCIe clock scheme register
+ - "rumi" (opt) PCIe RUMI register
+
+- cell-index:
+ Usage: required
+ Value type: <u32>
+ Definition: defines root complex ID.
+
+- linux,pci-domain:
+ Usage: required
+ Value type: <u32>
+ Definition: As specified in pci.txt
+
+- #address-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: Should be 3. As specified in designware-pcie.txt
+
+- #size-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: Should be 2. As specified in designware-pcie.txt
+
+- ranges:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: As specified in designware-pcie.txt
+
+- interrupt-parent:
+ Usage: required
+ Value type: <phandle>
+ Definition: Phandle of the interrupt controller that services
+ interrupts for this device
+
+- interrupts:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: PCIe root complex related interrupts
+
+- interrupt-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: Should contain
+ - "int_msi"
+ - "int_a"
+ - "int_b"
+ - "int_c"
+ - "int_d",
+ - "int_global_int"
+
+- #interrupt-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: Should be 1. As specified in designware-pcie.txt
+
+- interrupt-map-mask:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: As specified in designware-pcie.txt
+
+- interrupt-map:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: As specified in designware-pcie.txt
+
+- msi-parent:
+ Usage: required
+ Value type: <phandle>
+ Definition: As specified in pci-msi.txt
+
+- <name>-gpio:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: List of phandle and GPIO specifier pairs. Should contain:
+ - "perst-gpio" PCIe reset signal line
+ - "wake-gpio" PCIe wake signal line
+ - "qcom,ep-gpio" (opt) PCIe endpoint specific signal line
+
+- pinctrl-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: Name of pin configuration groups. Should contain:
+ - "default"
+ - "sleep" (opt)
+
+- pinctrl-<num>:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: As specified in pinctrl-bindings.txt
+
+- <supply-name>-supply:
+ Usage: required
+ Value type: <phandle>
+ Definition: Phandle to PCIe core and PHY power supply. Should contain:
+ - "gdsc-vdd-supply" PCIe power domain control
+ - "vreg-1.8-supply" power supply for PCIe PHY
+ - "vreg-0.9-supply" power supply for PCIe PHY
+ - "vreg-cx-supply" power supply for PCIe core
+ - "vreg-3.3-supply" (opt) power supply for PCIe endpoint
+
+- qcom,<supply-name>-voltage-level:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: List of max/min voltage(uV) and optimal current(uA) tuple
+ for power supply
+
+- qcom,msm-bus,<bus-field>:
+ Usage: required
+ Value type: <prop-encoded>
+ Definition: As specified in msm_bus.txt
+
+- clocks:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: List of phandle and clock specifier pairs as listed
+ in clock-names property
+
+- clock-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: List of clock names that corresponds with listed "clocks"
+
+- max-clock-frequency-hz:
+ Usage: optional
+ Value type: <u32 array>
+ Definition: List of clock frequencies for each PCIe clock. Only need to
+ specify the ones that needs to be changed
+
+- resets:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: List of phandle and reset specifier pairs as listed
+ in reset-names property
+
+- reset-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: Should contain:
+ - "pcie_<num>_core_reset" Core reset
+ - "pcie_<num>_phy_reset" PHY reset
+
+- qcom,smmu-sid-base:
+ Usage: optional
+ Value: <u32>
+ Definition: Base SID for PCIe
+
+- iommu-map:
+ Usage: optional. Required if qcom,smmu-sid-base is defined
+ Value type: <prop-encoded-array>
+ Definition: As defined in pci-iommu.txt. Should contain:
+ - <BDF, iommu phandle, SID, 0x1>
+
+- qcom,target-link-speed:
+ Usage: optional
+ Value type: <u32>
+ Definition: Override maximum GEN speed. Options:
+ - 0x1 GEN 1
+ - 0x2 GEN 2
+ - 0x3 GEN 3
+
+- qcom,link-check-max-count
+ Usage: optional
+ Value type: <u32>
+ Definition: Max number of retries for link training. Delay between each
+ check is 5ms
+
+- qcom,boot-option:
+ Usage: optional
+ Value type: <u32>
+ Definition: Controls PCIe bus driver boot sequence. Options:
+ - BIT(0) PCIe bus driver will not start enumeration
+ during its probe. Clients will control when
+ PCIe bus driver should do enumeration
+ - BIT(1) PCIe bus driver will not start enumeration if it
+ receives a WAKE interrupt
+
+- qcom,use-19p2mhz-aux-clk:
+ Usage: optional
+ Value type: <bool>
+ Definition: Set PCIe AUX clock frequency to 19.2MHz
+
+- qcom,common-clk-en:
+ Usage: optional
+ Value type: <bool>
+ Definition: Support common clock configuration
+
+- qcom,clk-power-manage-en:
+ Usage: optional
+ Value type: <bool>
+ Definition: Support clock power management
+
+- qcom,n-fts:
+ Usage: optional
+ Value type: <u32>
+ Definition: Number of fast training sequences sent when the link
+ transitions from L0s to L0
+
+- qcom,no-l0s-supported:
+ Usage: optional
+ Value type: <bool>
+ Definition: L0s is not supported
+
+- qcom,no-l1-supported:
+ Usage: optional
+ Value type: <bool>
+ Definition: L1 is not supported
+
+- qcom,no-l1ss-supported:
+ Usage: optional
+ Value type: <bool>
+ Definition: L1 sub-state (L1ss) is not supported
+
+- qcom,no-aux-clk-sync:
+ Usage: optional
+ Value type: <bool>
+ Definition: The AUX clock is not synchronous to the Core clock to
+ support L1ss
+
+- qcom,slv-addr-space-size:
+ Usage: required
+ Value type: <u32>
+ Definition: Memory block size dedicated to PCIe root complex
+
+- qcom,wr-halt-size:
+ Usage: optional
+ Value type: <u32>
+ Definition: Exponent (base 2) that determines the data size(bytes) that
+ PCIe core will halt for each write
+
+- qcom,tlp-rd-size:
+ Usage: optional
+ Value type: <u32>
+ Definition: Determines the maximum read request size(bytes). Options:
+ - 0 128
+ - 1 256
+ - 2 512
+ - 3 1K
+ - 4 2K
+ - 5 4K
+
+- qcom,cpl-timeout:
+ Usage: optional
+ Value type: <u32>
+ Definition: Determines the timeout range PCIe root complex will send
+ out a completion packet if no ACK is seen for TLP. Options:
+ - BIT(0) 50us to 10ms
+ - BIT(1) 10ms to 250ms
+ - BIT(2) 250ms to 4s
+ - BIT(3) 4s to 64s
+
+- qcom,perst-delay-us-min:
+ Usage: optional
+ Value type: <u32>
+ Definition: Minimum allowed time(us) to sleep after asserting or
+ de-asserting PERST GPI.
+
+- qcom,perst-delay-us-max:
+ Usage: optional
+ Value type: <u32>
+ Definition: Maximum allowed time(us) to sleep after asserting or
+ de-asserting PERST GPIO
+
+- qcom,ep-latency:
+ Usage: optional
+ Value type: <u32>
+ Definition: The latency(ms) between when PCIe PHY is up and PERST is
+ de-asserted. This guarantees the 100MHz clock is available for
+ the PCIe devices
+
+- qcom,switch-latency:
+ Usage: optional
+ Definition: The latency(ms) between when PCIe link is up and before
+ any device over the switch is accessed
+
+- qcom,pcie-phy-ver:
+ Usage: required
+ Value type: <u32>
+ Definition: States the PCIe PHY version
+
+- qcom,phy-status-offset:
+ Usage: required
+ Value type: <u32>
+ Definition: Offset from PCIe PHY base to check if PCIe PHY status
+
+- qcom,phy-power-down-offset:
+ Usage: required
+ Value type: <u32>
+ Definition: Offset from PCIe PHY base to control PHY power state
+
+- qcom,phy-sequence:
+ Usage: required
+ Value type: <prop-encoded array>
+ Definition: PCIe PHY initialization sequence
+
+
+==============
+Root port node
+==============
+
+Root port are defined as subnodes of the PCIe controller node
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded array>
+ Definition: First cell is devfn, which is determined by pci bus
+ topology. Assign the other cells 0 since they are not used
+
+- qcom,iommu-cfg:
+ Usage: optional
+ Value type: <u32>
+ Definition: Defines PCIe root port SMMU configuration. Options:
+ - BIT(0) Indicates if SMMU is present
+ - BIT(1) Set IOMMU attribute S1_BYPASS
+ - BIT(2) Set IOMMU attribute FAST
+ - BIT(3) Set IOMMU attribute ATOMIC
+ - BIT(4) Set IOMMU attribute FORCE COHERENT
+
+- qcom,iommu-range:
+ Usage: optional
+ Value type: Array of <u64>
+ Definition: Pair of values describing iova base and size to allocate
+
+=======
+Example
+=======
+
+ pcie0: qcom,pcie@1c00000 {
+ compatible = "qcom,pci-msm";
+
+ reg = <0x1c00000 0x4000>,
+ <0x1c04000 0x1000>,
+ <0x60000000 0xf1d>,
+ <0x60000f20 0xa8>,
+ <0x60001000 0x1000>,
+ <0x60100000 0x100000>,
+ <0x60200000 0x100000>,
+ <0x60300000 0x3d00000>;
+ reg-names = "parf", "phy", "dm_core", "elbi", "iatu", "conf",
+ "io", "bars", "tcsr", "rumi";
+
+ cell-index = <0>;
+ device_type = "pci";
+ linux,pci-domain = <0>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x60200000 0x60200000 0x0 0x100000>,
+ <0x02000000 0x0 0x60300000 0x60300000 0x0 0x3d00000>;
+
+ interrupt-parent = <&pcie0>;
+ interrupts = <0 1 2 3 4 5>;
+ interrupt-names = "int_msi", "int_a", "int_b", "int_c", "int_d",
+ "int_global_int",
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0xffffffff>;
+ interrupt-map = <0 0 0 0 &intc 0 141 0
+ 0 0 0 1 &intc 0 149 0
+ 0 0 0 2 &intc 0 150 0
+ 0 0 0 3 &intc 0 151 0
+ 0 0 0 4 &intc 0 152 0
+ 0 0 0 5 &intc 0 140 0>;
+ msi-parent = <&pcie0_msi>;
+
+ perst-gpio = <&tlmm 35 0>;
+ wake-gpio = <&tlmm 37 0>;
+ qcom,ep-gpio = <&tlmm 94 0>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pcie0_clkreq_default
+ &pcie0_perst_default
+ &pcie0_wake_default>;
+ pinctrl-1 = <&pcie0_clkreq_sleep
+ &pcie0_perst_sleep
+ &pcie0_wake_sleep>;
+
+ gdsc-vdd-supply = <&pcie_0_gdsc>;
+ vreg-1.8-supply = <&pm8150l_l3>;
+ vreg-0.9-supply = <&pm8150_l5>;
+ vreg-cx-supply = <&VDD_CX_LEVEL>;
+ vreg-3.3-supply = <&pm8150_l1>;
+ qcom,vreg-1.8-voltage-level = <1800000 1800000 1000>;
+ qcom,vreg-0.9-voltage-level = <950000 950000 24000>;
+ qcom,vreg-cx-voltage-level = <RPMH_REGULATOR_LEVEL_MAX
+ RPMH_REGULATOR_LEVEL_NOM 0>;
+
+ qcom,msm-bus,name = "pcie0";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <45 512 0 0>,
+ <45 512 500 800>;
+
+ clocks = <&clock_gcc GCC_PCIE_0_PIPE_CLK>,
+ <&clock_rpmh RPMH_CXO_CLK>,
+ <&clock_gcc GCC_PCIE_0_AUX_CLK>,
+ <&clock_gcc GCC_PCIE_0_CFG_AHB_CLK>,
+ <&clock_gcc GCC_PCIE_0_MSTR_AXI_CLK>,
+ <&clock_gcc GCC_PCIE_0_SLV_AXI_CLK>,
+ <&clock_gcc GCC_PCIE_0_CLKREF_CLK>,
+ <&clock_gcc GCC_PCIE_0_SLV_Q2A_AXI_CLK>,
+ <&clock_gcc GCC_AGGRE_NOC_PCIE_TBU_CLK>,
+ <&clock_gcc GCC_PCIE0_PHY_REFGEN_CLK>,
+ <&clock_gcc GCC_PCIE_PHY_AUX_CLK>;
+ clock-names = "pcie_0_pipe_clk", "pcie_0_ref_clk_src",
+ "pcie_0_aux_clk", "pcie_0_cfg_ahb_clk",
+ "pcie_0_mstr_axi_clk", "pcie_0_slv_axi_clk",
+ "pcie_0_ldo", "pcie_0_slv_q2a_axi_clk",
+ "pcie_tbu_clk", "pcie_phy_refgen_clk",
+ "pcie_phy_aux_clk";
+ max-clock-frequency-hz = <0>, <0>, <19200000>, <0>, <0>, <0>,
+ <0>, <0>, <0>, <0>, <100000000>, <0>;
+
+ resets = <&clock_gcc GCC_PCIE_0_BCR>,
+ <&clock_gcc GCC_PCIE_0_PHY_BCR>;
+ reset-names = "pcie_0_core_reset",
+ "pcie_0_phy_reset";
+
+ qcom,smmu-sid-base = <0x1e00>;
+ iommu-map = <0x0 &apps_smmu 0x1e00 0x1>,
+ <0x100 &apps_smmu 0x1e01 0x1>;
+
+ qcom,target-link-speed = <0x2>;
+ qcom,link-check-max-count = <40> /* 200ms */
+ qcom,boot-option = <0x1>;
+ qcom,use-19p2mhz-aux-clk;
+ qcom,common-clk-en;
+ qcom,clk-power-manage-en;
+ qcom,n-fts = <0x50>;
+ qcom,no-l0s-supported;
+ qcom,no-l1-supported;
+ qcom,no-l1ss-supported;
+ qcom,no-aux-clk-sync;
+ qcom,slv-addr-space-size = <0x1000000>; /* 16MB */
+ qcom,wr-halt-size = <0xa>; /* 1KB */
+ qcom,tlp-rd-size = <0x5>; /* 4KB */
+ qcom,cpl-timeout = <0x2>; /* 10ms to 250ms */
+ qcom,perst-delay-us-min = <10>;
+ qcom,perst-delay-us-max = <15>;
+ qcom,ep-latency = <20>;
+ qcom,switch-latency = <25>;
+
+ qcom,pcie-phy-ver = <0x2101>; /* v2 version 1.01 */
+ qcom,phy-status-offset = <0x814>;
+ qcom,phy-power-down-offset = <0x840>;
+ qcom,phy-sequence = <0x0840 0x03 0x0
+ 0x0094 0x08 0x0
+ 0x0154 0x34 0x0
+ 0x016c 0x08 0x0
+ 0x0058 0x0f 0x0
+ 0x00a4 0x42 0x0
+ 0x0110 0x24 0x0
+ 0x0800 0x00 0x0
+ 0x0844 0x03 0x0>;
+
+ pcie0_rp: pcie0_rp {
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ qcom,iommu-cfg = <0x3> /* SMMU PRESENT. SET S1 BYPASS */
+ qcom,iommu-range = <0x0 0x10000000 0x0 0x40000000>;
+ };
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt
new file mode 100644
index 0000000..2515f05
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt
@@ -0,0 +1,423 @@
+Qualcomm Technologies, Inc. SMB5 Charger Specific Bindings
+
+SMB5 Charger is an efficient programmable battery charger capable of charging a
+high-capacity lithium-ion battery over micro-USB or USB Type-C ultrafast with
+Quick Charge 2.0, Quick Charge 3.0, and USB Power Delivery support. Wireless
+charging features full A4WP Rezence 1.2, WPC 1.2, and PMA support.
+
+=======================
+Required Node Structure
+=======================
+
+SMB5 Charger must be described in two levels of devices nodes.
+
+===============================
+First Level Node - SMB5 Charger
+===============================
+
+Charger specific properties:
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: "qcom,qpnp-smb5".
+
+- qcom,pmic-revid
+ Usage: required
+ Value type: phandle
+ Definition: Should specify the phandle of PMI's revid module. This is used to
+ identify the PMI subtype.
+
+- qcom,sec-charger-config
+ Usage: optional
+ Value type: <u32>
+ Definition: Specify how the secondary chargers are configured.
+ 0 - No secondary charger.
+ 1 - Charge Pump SMB1390.
+ 2 - SMB1355 parallel charger.
+ 3 - Both Charge Pump and SMB1355.
+ If the value is not present, 0 is used as default.
+
+- io-channels
+- io-channel-names
+ Usage: optional
+ Value type: <phandle>
+ Definition: For details about IIO bindings see:
+ Documentation/devicetree/bindings/iio/iio-bindings.txt
+
+- qcom,batteryless-platform
+ Usage: optional
+ Value type: <empty>
+ Definition: Boolean flag which indicates that the platform does not have a
+ battery, and therefore charging should be disabled. In
+ addition battery properties will be faked such that the device
+ assumes normal operation.
+
+- qcom,charger-temp-max
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the charger temp REG_H_THRESHOLD for PM8150B in deciDegC.
+ If the value is not present, use the setting read from the device.
+
+- qcom,smb-temp-max
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the charger temp REG_H_THRESHOLD for SMB1355 in deciDegC.
+ If the value is not present, use the setting read from the device.
+
+- qcom,fcc-max-ua
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the maximum fast charge current in micro-amps.
+ If the value is not present, 1Amp is used as default.
+
+- qcom,fv-max-uv
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the maximum float voltage in micro-volts.
+ If the value is not present, 4.35V is used as default.
+
+- qcom,usb-icl-ua
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the USB input current limit in micro-amps.
+ If the value is not present, 1.5Amps is used as default.
+
+- qcom,usb-ocl-ua
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the OTG output current limit in micro-amps.
+ If the value is not present, 1.5Amps is used as default.
+
+- qcom,dc-icl-ua
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the DC input current limit in micro-amps.
+
+- qcom,boost-threshold-ua
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the boost current threshold in micro-amps.
+ If the value is not present, 100mA is used as default.
+
+- qcom,thermal-mitigation
+ Usage: optional
+ Value type: Array of <u32>
+ Definition: Array of fast charge current limit values for
+ different system thermal mitigation levels.
+ This should be a flat array that denotes the
+ maximum charge current in mA for each thermal
+ level.
+
+- qcom,float-option
+ Usage: optional
+ Value type: <u32>
+ Definition: Configures how the charger behaves when a float charger is
+ detected by APSD.
+ 1 - Treat as a DCP.
+ 2 - Treat as a SDP.
+ 3 - Disable charging.
+ 4 - Suspend USB input.
+
+- qcom,hvdcp-disable
+ Usage: optional
+ Value type: <empty>
+ Definition: Specifies if hvdcp charging is to be enabled or not.
+ If this property is not specified hvdcp will be enabled.
+ If this property is specified, hvdcp 2.0 detection will still
+ happen but the adapter won't be asked to switch to a higher
+ voltage point.
+
+- qcom,chg-inhibit-threshold-mv
+ Usage: optional
+ Value type: <u32>
+ Definition: Charge inhibit threshold in milli-volts. Charging will be
+ inhibited when the battery voltage is within this threshold
+ from Vfloat at charger insertion. If this is not specified
+ then charge inhibit will be disabled by default.
+ Allowed values are: 50, 100, 200, 300.
+
+- qcom,chg-term-src
+ Usage: optional
+ Value type: <u32>
+ Definition: Specify either the ADC or analog comparators to be used in order
+ to set threshold values for charge termination current.
+ 0 - Unspecified
+ 1 - Select ADC comparator
+ 2 - Select ANALOG comparator
+
+- qcom,chg-term-current-ma
+ Usage: optional
+ Value type: <u32>
+ Definition: When ADC comparator is selected as qcom,chg-term-src, this
+ parameter should be set to the desired upper threshold.
+
+- qcom,chg-term-base-current-ma
+ Usage: optional
+ Value type: <u32>
+ Definition: When ADC comparator is selected as qcom,chg-term-src, this
+ parameter should be set to the desired lower threshold.
+
+- qcom,auto-recharge-soc
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the SOC threshold at which the charger will
+ restart charging after termination. The value specified
+ ranges from 0 - 100. The feature is enabled if this
+ property is specified with a valid SOC value.
+
+- qcom,auto-recharge-vbat-mv
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the battery voltage threshold at which the charger
+ will restart charging after termination. The value specified
+ is in milli-volts.
+
+- qcom,suspend-input-on-debug-batt
+ Usage: optional
+ Value type: <empty>
+ Definition: Boolean flag which when present enables input suspend for
+ debug battery.
+
+- qcom,min-freq-khz
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the minimum charger buck/boost switching frequency
+ in KHz. It overrides the min frequency defined for the charger.
+
+- qcom,max-freq-khz
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the maximum charger buck/boost switching frequency in
+ KHz. It overrides the max frequency defined for the charger.
+
+- qcom,otg-deglitch-time-ms
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the deglitch interval for OTG detection.
+ If the value is not present, 50 msec is used as default.
+
+- qcom,step-charging-enable
+ Usage: optional
+ Value type: bool
+ Definition: Boolean flag which when present enables step-charging.
+
+- qcom,wd-bark-time-secs
+ Usage: optional
+ Value type: <u32>
+ Definition: WD bark-timeout in seconds. The possible values are
+ 16, 32, 64, 128. If not defined it defaults to 64.
+
+- qcom,sw-jeita-enable
+ Usage: optional
+ Value type: bool
+ Definition: Boolean flag which when present enables sw compensation for
+ jeita.
+
+- qcom,battery-data
+ Usage: optional
+ Value type: <phandle>
+ Definition: Specifies the phandle of the node which contains the battery
+ profiles supported on the device.
+
+- qcom,flash-derating-soc
+ Usage: optional
+ Value type: <u32>
+ Definition: SOC threshold in percentage below which hardware will start
+ derating flash. This is only applicable to certain PMICs like
+ PMI632 which has SCHGM_FLASH peripheral.
+
+- qcom,flash-disable-soc
+ Usage: optional
+ Value type: <u32>
+ Definition: SOC threshold in percentage below which hardware will disable
+ flash. This is only applicable to certain PMICs like PMI632
+ which has SCHGM_FLASH peripheral.
+
+- qcom,headroom-mode
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies flash hardware headroom management policy. The
+ possible values are:
+ <0>: Fixed mode, constant 5V at flash input.
+ <1>: Adaptive mode allows charger output voltage to be
+ dynamically controlled by the flash module based on the
+ required flash headroom.
+ This is only applicable to certain PMICs like PMI632 which
+ has SCHGM_FLASH peripheral.
+
+- qcom,fcc-stepping-enable
+ Usage: optional
+ Value type: bool
+ Definition: Boolean flag which when present enables stepwise change in FCC.
+ The default stepping rate is 100mA/sec.
+
+=============================================
+Second Level Nodes - SMB5 Charger Peripherals
+=============================================
+
+Peripheral specific properties:
+- reg
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Address and size of the peripheral's register block.
+
+- interrupts
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Peripheral interrupt specifier.
+
+- interrupt-names
+ Usage: required
+ Value type: <stringlist>
+ Definition: Interrupt names. This list must match up 1-to-1 with the
+ interrupts specified in the 'interrupts' property.
+
+=======
+Example
+=======
+
+pm8150b_charger: qcom,qpnp-smb5 {
+ compatible = "qcom,qpnp-smb5";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ qcom,pmic-revid = <&pm8150b_revid>;
+
+ dpdm-supply = <&qusb_phy0>;
+
+ qcom,sec-charger-config = <1>;
+
+ io-channels = <&pm8150b_vadc ADC_USB_IN_V_16>,
+ <&pm8150b_vadc ADC_USB_IN_I>,
+ <&pm8150b_vadc ADC_CHG_TEMP>;
+ io-channel-names = "usb_in_voltage",
+ "usb_in_current",
+ "chg_temp";
+
+ qcom,chgr@1000 {
+ reg = <0x1000 0x100>;
+ interrupts = <0x2 0x10 0x0 IRQ_TYPE_NONE>,
+ <0x2 0x10 0x1 IRQ_TYPE_NONE>,
+ <0x2 0x10 0x2 IRQ_TYPE_NONE>,
+ <0x2 0x10 0x3 IRQ_TYPE_NONE>,
+ <0x2 0x10 0x4 IRQ_TYPE_NONE>;
+
+ interrupt-names = "chg-error",
+ "chg-state-change",
+ "step-chg-state-change",
+ "step-chg-soc-update-fail",
+ "step-chg-soc-update-request";
+ };
+
+ qcom,otg@1100 {
+ reg = <0x1100 0x100>;
+ interrupts = <0x2 0x11 0x0 IRQ_TYPE_NONE>,
+ <0x2 0x11 0x1 IRQ_TYPE_NONE>,
+ <0x2 0x11 0x2 IRQ_TYPE_NONE>,
+ <0x2 0x11 0x3 IRQ_TYPE_NONE>;
+
+ interrupt-names = "otg-fail",
+ "otg-overcurrent",
+ "otg-oc-dis-sw-sts",
+ "testmode-change-detect";
+ };
+
+ qcom,bat-if@1200 {
+ reg = <0x1200 0x100>;
+ interrupts = <0x2 0x12 0x0 IRQ_TYPE_NONE>,
+ <0x2 0x12 0x1 IRQ_TYPE_NONE>,
+ <0x2 0x12 0x2 IRQ_TYPE_NONE>,
+ <0x2 0x12 0x3 IRQ_TYPE_NONE>,
+ <0x2 0x12 0x4 IRQ_TYPE_NONE>,
+ <0x2 0x12 0x5 IRQ_TYPE_NONE>;
+
+ interrupt-names = "bat-temp",
+ "bat-ocp",
+ "bat-ov",
+ "bat-low",
+ "bat-therm-or-id-missing",
+ "bat-terminal-missing";
+ };
+
+ qcom,usb-chgpth@1300 {
+ reg = <0x1300 0x100>;
+ interrupts = <0x2 0x13 0x0 IRQ_TYPE_NONE>,
+ <0x2 0x13 0x1 IRQ_TYPE_NONE>,
+ <0x2 0x13 0x2 IRQ_TYPE_NONE>,
+ <0x2 0x13 0x3 IRQ_TYPE_NONE>,
+ <0x2 0x13 0x4 IRQ_TYPE_NONE>,
+ <0x2 0x13 0x5 IRQ_TYPE_NONE>,
+ <0x2 0x13 0x6 IRQ_TYPE_NONE>,
+ <0x2 0x13 0x7 IRQ_TYPE_NONE>;
+
+ interrupt-names = "usbin-collapse",
+ "usbin-lt-3p6v",
+ "usbin-uv",
+ "usbin-ov",
+ "usbin-plugin",
+ "usbin-src-change",
+ "usbin-icl-change",
+ "type-c-change";
+ };
+
+ qcom,dc-chgpth@1400 {
+ reg = <0x1400 0x100>;
+ interrupts = <0x2 0x14 0x0 IRQ_TYPE_NONE>,
+ <0x2 0x14 0x1 IRQ_TYPE_NONE>,
+ <0x2 0x14 0x2 IRQ_TYPE_NONE>,
+ <0x2 0x14 0x3 IRQ_TYPE_NONE>,
+ <0x2 0x14 0x4 IRQ_TYPE_NONE>,
+ <0x2 0x14 0x5 IRQ_TYPE_NONE>,
+ <0x2 0x14 0x6 IRQ_TYPE_NONE>;
+
+ interrupt-names = "dcin-collapse",
+ "dcin-lt-3p6v",
+ "dcin-uv",
+ "dcin-ov",
+ "dcin-plugin",
+ "div2-en-dg",
+ "dcin-icl-change";
+ };
+
+ qcom,chgr-misc@1600 {
+ reg = <0x1600 0x100>;
+ interrupts = <0x2 0x16 0x0 IRQ_TYPE_NONE>,
+ <0x2 0x16 0x1 IRQ_TYPE_NONE>,
+ <0x2 0x16 0x2 IRQ_TYPE_NONE>,
+ <0x2 0x16 0x3 IRQ_TYPE_NONE>,
+ <0x2 0x16 0x4 IRQ_TYPE_NONE>,
+ <0x2 0x16 0x5 IRQ_TYPE_NONE>,
+ <0x2 0x16 0x6 IRQ_TYPE_NONE>,
+ <0x2 0x16 0x7 IRQ_TYPE_NONE>;
+
+ interrupt-names = "wdog-snarl",
+ "wdog-bark",
+ "aicl-fail",
+ "aicl-done",
+ "high-duty-cycle",
+ "input-current-limiting",
+ "temperature-change",
+ "switcher-power-ok";
+ };
+
+ qcom,schgm-flash@a600 {
+ reg = <0xa600 0x100>;
+ interrupts = <0x2 0xa6 0x0 IRQ_TYPE_NONE>,
+ <0x2 0xa6 0x1 IRQ_TYPE_NONE>,
+ <0x2 0xa6 0x2 IRQ_TYPE_NONE>,
+ <0x2 0xa6 0x3 IRQ_TYPE_NONE>,
+ <0x2 0xa6 0x4 IRQ_TYPE_NONE>,
+ <0x2 0xa6 0x5 IRQ_TYPE_NONE>,
+ <0x2 0xa6 0x6 IRQ_TYPE_NONE>,
+ <0x2 0xa6 0x7 IRQ_TYPE_NONE>;
+
+ interrupt-names = "flash-en",
+ "torch-req",
+ "flash-state-change",
+ "vout-up",
+ "vout-down",
+ "ilim1-s1",
+ "ilim2-s2",
+ "vreg-ok";
+ };
+};
diff --git a/Documentation/devicetree/bindings/pwm/pwm-qti-lpg.txt b/Documentation/devicetree/bindings/pwm/pwm-qti-lpg.txt
new file mode 100644
index 0000000..d01f25a
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/pwm-qti-lpg.txt
@@ -0,0 +1,159 @@
+Qualcomm Technologies, Inc. LPG driver specific bindings
+
+This binding document describes the properties of LPG (Light Pulse Generator)
+device module in Qualcomm Technologies, Inc. PMIC chips.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: Must be "qcom,pwm-lpg".
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Register base and length for LPG and LUT modules. LPG size
+ or length available per channel varies depending on the
+ number of channels in PMIC.
+
+- reg-names:
+ Usage: required
+ Value type: <string>
+ Definition: The name of the register defined in the reg property.
+ It must have "lpg-base", "lut-base" is optional but
+ it's required if any LPG channels support LUT mode.
+
+- #pwm-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: The number of cells in "pwms" property specified in
+ PWM user nodes. It should be 2. The first cell is
+ the PWM channel ID indexed from 0, and the second
+ cell is the PWM default period in nanoseconds.
+
+- qcom,lut-patterns:
+ Usage: optional
+ Value type: <prop-encoded-array>
+ Definition: Duty ratios in percentages for LPG working at LUT mode.
+ These duty ratios will be translated into PWM values
+ and stored in LUT module. The LUT module has resource
+ to store 47 PWM values at max and shared for all LPG
+ channels. This property is required if any LPG channels
+ support LUT mode.
+
+- qcom,sync-channel-ids:
+ Usage: optional
+ Value type: <prop-encoded-array>
+ Definition: The hardware IDs of the LPG channel that required be
+ grouped together. These channels will share the same LUT
+ ramping configuration so that they will be enabled with a
+ synchronized pattern. If the LUT ramping configuration
+ differs for the channels grouped for synchronization,
+ configuration of the first channel will be applied for
+ all others.
+
+Subnode is optional if LUT mode is not required, it's required if any LPG
+channels expected to be supported in LUT mode.
+
+Subnode properties:
+Subnodes for each LPG channel (lpg@X) can be defined if any of the following
+parameters needs to be configured for that channel.
+
+- qcom,lpg-chan-id:
+ Usage: required
+ Value type: <u32>
+ Definition: The LPG channel's hardware ID indexed from 1. Allowed
+ range is 1 - 8. Maximum value depends on the number of
+ channels supported on PMIC.
+
+- qcom,ramp-step-ms:
+ Usage: required
+ Value type: <u32>
+ Definition: The step duration in milliseconds for LPG staying at each
+ duty specified in the LUT pattern. Allowed range is
+ 1 - 511.
+
+- qcom,ramp-high-index:
+ Usage: required
+ Value type: <u32>
+ Definition: The high index of the LUT pattern where LPG ends up
+ ramping to. Allowed range is 1 - 47.
+
+- qcom,ramp-low-index:
+ Usage: required
+ Value type: <u32>
+ Definition: The low index of the LUT pattern from where LPG begins
+ ramping from. Allowed range is 0 - 46.
+
+- qcom,ramp-from-low-to-high:
+ Usage: optional
+ Value type: <empty>
+ Definition: The flag to specify the LPG ramping direction. The ramping
+ direction is from low index to high index of the LUT
+ pattern if it's specified.
+
+- qcom,ramp-pattern-repeat:
+ Usage: optional
+ Value type: <empty>
+ Definition: The flag to specify if LPG would be ramping with the LUT
+ pattern repeatedly.
+
+- qcom,ramp-toggle:
+ Usage: optional
+ Value type: <empty>
+ Definition: The flag to specify if LPG would toggle the LUT pattern
+ in ramping. If toggling enabled, LPG would return to the
+ low index when high index is reached, or return to the high
+ index when low index is reached.
+
+- qcom,ramp-pause-hi-count:
+ Usage: optional
+ Value type: <u32>
+ Definition: The step count that LPG stop the output when it ramped up
+ to the high index of the LUT.
+
+- qcom,ramp-pause-lo-count:
+ Usage: optional
+ Value type: <u32>
+ Definition: The step count that LPG stop the output when it ramped up
+ to the low index of the LUT.
+Example:
+
+ pmi8998_lpg: lpg@b100 {
+ compatible = "qcom,pwm-lpg";
+ reg = <0xb100 0x600>, <0xb000 0x100>;
+ reg-names = "lpg-base", "lut-base";
+ #pwm-cells = <2>;
+ qcom,lut-patterns = <0 14 28 42 56 70 84 100
+ 100 84 70 56 42 28 14 0>;
+ qcom,sync-channel-ids = <3 4 5>;
+ lpg@3 {
+ qcom,lpg-chan-id = <3>;
+ qcom,ramp-step-ms = <200>;
+ qcom,ramp-pause-hi-count = <10>;
+ qcom,ramp-pause-lo-count = <10>;
+ qcom,ramp-low-index = <0>;
+ qcom,ramp-high-index = <15>;
+ qcom,ramp-from-low-to-high;
+ qcom,ramp-pattern-repeat;
+ };
+ lpg@4 {
+ qcom,lpg-chan-id = <4>;
+ qcom,ramp-step-ms = <200>;
+ qcom,ramp-pause-hi-count = <10>;
+ qcom,ramp-pause-lo-count = <10>;
+ qcom,ramp-low-index = <0>;
+ qcom,ramp-high-index = <15>;
+ qcom,ramp-from-low-to-high;
+ qcom,ramp-pattern-repeat;
+ };
+ lpg@5 {
+ qcom,lpg-chan-id = <5>;
+ qcom,ramp-step-ms = <200>;
+ qcom,ramp-pause-hi-count = <10>;
+ qcom,ramp-pause-lo-count = <10>;
+ qcom,ramp-low-index = <0>;
+ qcom,ramp-high-index = <15>;
+ qcom,ramp-from-low-to-high;
+ qcom,ramp-pattern-repeat;
+ };
+ };
diff --git a/arch/arm64/boot/dts/qcom/kona.dtsi b/arch/arm64/boot/dts/qcom/kona.dtsi
index 1398a16..3999867 100644
--- a/arch/arm64/boot/dts/qcom/kona.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona.dtsi
@@ -1286,6 +1286,35 @@
mbox-names = "cdsp-pil";
};
+
+ qcom,venus@aab0000 {
+ compatible = "qcom,pil-tz-generic";
+ reg = <0xaab0000 0x2000>;
+/*
+ * vdd-supply = <&mvsc_gdsc>;
+ * qcom,proxy-reg-names = "vdd";
+ * qcom,complete-ramdump;
+ *
+ * clocks = <&clock_videocc VIDEO_CC_XO_CLK>,
+ * <&clock_videocc VIDEO_CC_MVSC_CORE_CLK>,
+ * <&clock_videocc VIDEO_CC_IRIS_AHB_CLK>;
+ * clock-names = "xo", "core", "ahb";
+ * qcom,proxy-clock-names = "xo", "core", "ahb";
+ */
+ qcom,core-freq = <200000000>;
+ qcom,ahb-freq = <200000000>;
+
+ qcom,pas-id = <9>;
+ qcom,msm-bus,name = "pil-venus";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <63 512 0 0>,
+ <63 512 0 304000>;
+ qcom,proxy-timeout-ms = <100>;
+ qcom,firmware-name = "venus";
+ memory-region = <&pil_video_mem>;
+ };
};
#include "kona-bus.dtsi"
diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c
index 0940e84..483e180 100644
--- a/drivers/gpu/drm/msm/edp/edp.c
+++ b/drivers/gpu/drm/msm/edp/edp.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015, 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -54,7 +54,7 @@ static struct msm_edp *edp_init(struct platform_device *pdev)
ret = -ENOMEM;
goto fail;
}
- DBG("eDP probed=%p", edp);
+ DBG("eDP probed=%pK", edp);
edp->pdev = pdev;
platform_set_drvdata(pdev, edp);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 5b0b9ff..50adc97 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1179,7 +1179,7 @@ static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe)
struct msm_kms *kms = priv->kms;
if (!kms)
return -ENXIO;
- DBG("dev=%p, crtc=%u", dev, pipe);
+ DBG("dev=%pK, crtc=%u", dev, pipe);
return vblank_ctrl_queue_work(priv, pipe, true);
}
@@ -1189,7 +1189,7 @@ static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe)
struct msm_kms *kms = priv->kms;
if (!kms)
return;
- DBG("dev=%p, crtc=%u", dev, pipe);
+ DBG("dev=%pK, crtc=%u", dev, pipe);
vblank_ctrl_queue_work(priv, pipe, false);
}
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 45cd190..e948b03 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -321,7 +321,7 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
unsigned int hsub, vsub;
bool is_modified = false;
- DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
+ DBG("create framebuffer: dev=%pK, mode_cmd=%pK (%dx%d@%4.4s)",
dev, mode_cmd, mode_cmd->width, mode_cmd->height,
(char *)&mode_cmd->pixel_format);
@@ -405,7 +405,7 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
goto fail;
}
- DBG("create: FB ID: %d (%p)", fb->base.id, fb);
+ DBG("create: FB ID: %d (%pK)", fb->base.id, fb);
return fb;
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 456622b..6923ec2 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -117,7 +117,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
goto fail_unlock;
}
- DBG("fbi=%p, dev=%p", fbi, dev);
+ DBG("fbi=%pK, dev=%pK", fbi, dev);
fbdev->fb = fb;
helper->fb = fb;
@@ -141,7 +141,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
fbi->fix.smem_start = paddr;
fbi->fix.smem_len = bo->size;
- DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
+ DBG("par=%pK, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index f6a8ea2..855caaa 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -282,7 +282,7 @@ vm_fault_t msm_gem_fault(struct vm_fault *vmf)
pfn = page_to_pfn(pages[pgoff]);
- VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
+ VERB("Inserting %pK pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
@@ -921,7 +921,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
break;
}
- seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
+ seq_printf(m, "%08x: %c %2d (%2d) %08llx %pK\t",
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
obj->name, kref_read(&obj->refcount),
off, msm_obj->vaddr);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 5e808cf..442ffd0 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -878,18 +878,18 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
goto fail;
gpu->ebi1_clk = msm_clk_get(pdev, "bus");
- DBG("ebi1_clk: %p", gpu->ebi1_clk);
+ DBG("ebi1_clk: %pK", gpu->ebi1_clk);
if (IS_ERR(gpu->ebi1_clk))
gpu->ebi1_clk = NULL;
/* Acquire regulators: */
gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
- DBG("gpu_reg: %p", gpu->gpu_reg);
+ DBG("gpu_reg: %pK", gpu->gpu_reg);
if (IS_ERR(gpu->gpu_reg))
gpu->gpu_reg = NULL;
gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
- DBG("gpu_cx: %p", gpu->gpu_cx);
+ DBG("gpu_cx: %pK", gpu->gpu_cx);
if (IS_ERR(gpu->gpu_cx))
gpu->gpu_cx = NULL;
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index 479cba7..27be197 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -49,6 +49,8 @@ static void dspp_sixzone_install_property(struct drm_crtc *crtc);
static void dspp_ad_install_property(struct drm_crtc *crtc);
+static void dspp_ltm_install_property(struct drm_crtc *crtc);
+
static void dspp_vlut_install_property(struct drm_crtc *crtc);
static void dspp_gamut_install_property(struct drm_crtc *crtc);
@@ -85,6 +87,7 @@ do { \
func[SDE_DSPP_MEMCOLOR] = dspp_memcolor_install_property; \
func[SDE_DSPP_SIXZONE] = dspp_sixzone_install_property; \
func[SDE_DSPP_AD] = dspp_ad_install_property; \
+ func[SDE_DSPP_LTM] = dspp_ltm_install_property; \
func[SDE_DSPP_VLUT] = dspp_vlut_install_property; \
func[SDE_DSPP_GAMUT] = dspp_gamut_install_property; \
func[SDE_DSPP_GC] = dspp_gc_install_property; \
@@ -128,6 +131,14 @@ enum {
SDE_CP_CRTC_DSPP_AD_BACKLIGHT,
SDE_CP_CRTC_DSPP_AD_STRENGTH,
SDE_CP_CRTC_DSPP_AD_ROI,
+ SDE_CP_CRTC_DSPP_LTM,
+ SDE_CP_CRTC_DSPP_LTM_INIT,
+ SDE_CP_CRTC_DSPP_LTM_ROI,
+ SDE_CP_CRTC_DSPP_LTM_HIST_CTL,
+ SDE_CP_CRTC_DSPP_LTM_HIST_THRESH,
+ SDE_CP_CRTC_DSPP_LTM_SET_BUF,
+ SDE_CP_CRTC_DSPP_LTM_QUEUE_BUF,
+ SDE_CP_CRTC_DSPP_LTM_VLUT,
SDE_CP_CRTC_DSPP_MAX,
/* DSPP features end */
@@ -1438,6 +1449,64 @@ static void dspp_ad_install_property(struct drm_crtc *crtc)
}
}
+static void dspp_ltm_install_property(struct drm_crtc *crtc)
+{
+ char feature_name[256];
+ struct sde_kms *kms = NULL;
+ struct sde_mdss_cfg *catalog = NULL;
+ u32 version;
+
+ kms = get_kms(crtc);
+ catalog = kms->catalog;
+ version = catalog->dspp[0].sblk->ltm.version >> 16;
+ snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
+ "SDE_DSPP_LTM_V", version);
+ switch (version) {
+ case 1:
+ sde_cp_crtc_install_immutable_property(crtc,
+ feature_name, SDE_CP_CRTC_DSPP_LTM);
+
+ sde_cp_crtc_install_range_property(crtc, "SDE_DSPP_LTM_INIT_V1",
+ SDE_CP_CRTC_DSPP_LTM_INIT, 0, U64_MAX, 0);
+ sde_cp_create_local_blob(crtc, SDE_CP_CRTC_DSPP_LTM_INIT,
+ sizeof(struct drm_msm_ltm_init_param));
+
+ sde_cp_crtc_install_range_property(crtc, "SDE_DSPP_LTM_ROI_V1",
+ SDE_CP_CRTC_DSPP_LTM_ROI, 0, U64_MAX, 0);
+ sde_cp_create_local_blob(crtc, SDE_CP_CRTC_DSPP_LTM_ROI,
+ sizeof(struct drm_msm_ltm_cfg_param));
+
+ sde_cp_crtc_install_enum_property(crtc,
+ SDE_CP_CRTC_DSPP_LTM_HIST_CTL, sde_ltm_hist_modes,
+ ARRAY_SIZE(sde_ltm_hist_modes),
+ "SDE_DSPP_LTM_HIST_CTRL_V1");
+
+ sde_cp_crtc_install_range_property(crtc,
+ "SDE_DSPP_LTM_HIST_THRESH_V1",
+ SDE_CP_CRTC_DSPP_LTM_HIST_THRESH, 0, (BIT(10) - 1), 0);
+
+ sde_cp_crtc_install_range_property(crtc,
+ "SDE_DSPP_LTM_SET_BUF_V1",
+ SDE_CP_CRTC_DSPP_LTM_SET_BUF, 0, U64_MAX, 0);
+ sde_cp_create_local_blob(crtc, SDE_CP_CRTC_DSPP_LTM_SET_BUF,
+ sizeof(struct drm_msm_ltm_buffers_ctrl));
+
+ sde_cp_crtc_install_range_property(crtc,
+ "SDE_DSPP_LTM_QUEUE_BUF_V1",
+ SDE_CP_CRTC_DSPP_LTM_QUEUE_BUF, 0, U64_MAX, 0);
+
+ sde_cp_crtc_install_range_property(crtc,
+ "SDE_DSPP_LTM_VLUT_V1",
+ SDE_CP_CRTC_DSPP_LTM_VLUT, 0, U64_MAX, 0);
+ sde_cp_create_local_blob(crtc, SDE_CP_CRTC_DSPP_LTM_VLUT,
+ sizeof(struct drm_msm_ltm_data));
+ break;
+ default:
+ DRM_ERROR("version %d not supported\n", version);
+ break;
+ }
+}
+
static void lm_gc_install_property(struct drm_crtc *crtc)
{
char feature_name[256];
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.h b/drivers/gpu/drm/msm/sde/sde_color_processing.h
index 88aebe8..d1bda9a 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.h
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.h
@@ -41,6 +41,25 @@ static const struct drm_prop_enum_list sde_hist_modes[] = {
{HIST_ENABLED, "hist_on"},
};
+/*
+ * LTM HISTOGRAM modes
+ * @LTM_HIST_DISABLED Histogram disabled
+ * @LTM_HIST_ENABLED Histogram enabled
+ */
+enum ltm_hist_modes {
+ LTM_HIST_DISABLED,
+ LTM_HIST_ENABLED
+};
+
+/**
+ * struct drm_prop_enum_list - drm structure for creating enum property and
+ * enumerating values
+ */
+static const struct drm_prop_enum_list sde_ltm_hist_modes[] = {
+ {LTM_HIST_DISABLED, "ltm_hist_off"},
+ {LTM_HIST_ENABLED, "ltm_hist_on"},
+};
+
/**
* sde_cp_crtc_init(): Initialize color processing lists for a crtc.
* Should be called during crtc initialization.
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.c b/drivers/gpu/drm/msm/sde/sde_core_irq.c
index 1630b09..54a9fe7 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.c
@@ -115,16 +115,17 @@ static int _sde_core_irq_enable(struct sde_kms *sde_kms, int irq_idx)
SDE_EVT32(irq_idx,
atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
- if (atomic_inc_return(&sde_kms->irq_obj.enable_counts[irq_idx]) == 1) {
- ret = sde_kms->hw_intr->ops.enable_irq(
- sde_kms->hw_intr,
- irq_idx);
- if (ret)
- SDE_ERROR("Fail to enable IRQ for irq_idx:%d\n",
- irq_idx);
+ spin_lock_irqsave(&sde_kms->hw_intr->irq_lock, irq_flags);
+ if (atomic_inc_return(&sde_kms->irq_obj.enable_counts[irq_idx]) == 1)
+ ret = sde_kms->hw_intr->ops.enable_irq_nolock(
+ sde_kms->hw_intr, irq_idx);
+ spin_unlock_irqrestore(&sde_kms->hw_intr->irq_lock, irq_flags);
+ if (ret)
+ SDE_ERROR("Fail to enable IRQ for irq_idx:%d\n", irq_idx);
- SDE_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+ SDE_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+ if (atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]) == 1) {
spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
/* empty callback list but interrupt is enabled */
if (list_empty(&sde_kms->irq_obj.irq_cb_tbl[irq_idx]))
@@ -132,26 +133,18 @@ static int _sde_core_irq_enable(struct sde_kms *sde_kms, int irq_idx)
irq_idx);
spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
}
-
return ret;
}
int sde_core_irq_enable(struct sde_kms *sde_kms, int *irq_idxs, u32 irq_count)
{
- int i, ret = 0, counts;
+ int i, ret = 0;
if (!sde_kms || !irq_idxs || !irq_count) {
SDE_ERROR("invalid params\n");
return -EINVAL;
}
- counts = atomic_read(&sde_kms->irq_obj.enable_counts[irq_idxs[0]]);
- if (counts) {
- SDE_ERROR("%pS: irq_idx=%d enable_count=%d\n",
- __builtin_return_address(0), irq_idxs[0], counts);
- SDE_EVT32(irq_idxs[0], counts, SDE_EVTLOG_ERROR);
- }
-
for (i = 0; (i < irq_count) && !ret; i++)
ret = _sde_core_irq_enable(sde_kms, irq_idxs[i]);
@@ -166,6 +159,7 @@ int sde_core_irq_enable(struct sde_kms *sde_kms, int *irq_idxs, u32 irq_count)
static int _sde_core_irq_disable(struct sde_kms *sde_kms, int irq_idx)
{
int ret = 0;
+ unsigned long irq_flags;
if (!sde_kms || !sde_kms->hw_intr || !sde_kms->irq_obj.enable_counts) {
SDE_ERROR("invalid params\n");
@@ -182,35 +176,30 @@ static int _sde_core_irq_disable(struct sde_kms *sde_kms, int irq_idx)
SDE_EVT32(irq_idx,
atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
- if (atomic_dec_return(&sde_kms->irq_obj.enable_counts[irq_idx]) == 0) {
- ret = sde_kms->hw_intr->ops.disable_irq(
- sde_kms->hw_intr,
- irq_idx);
- if (ret)
- SDE_ERROR("Fail to disable IRQ for irq_idx:%d\n",
- irq_idx);
- SDE_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
- }
+
+ spin_lock_irqsave(&sde_kms->hw_intr->irq_lock, irq_flags);
+ if (atomic_add_unless(&sde_kms->irq_obj.enable_counts[irq_idx], -1, 0)
+ && atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]) == 0)
+ ret = sde_kms->hw_intr->ops.disable_irq_nolock(
+ sde_kms->hw_intr, irq_idx);
+ spin_unlock_irqrestore(&sde_kms->hw_intr->irq_lock, irq_flags);
+
+ if (ret)
+ SDE_ERROR("Fail to disable IRQ for irq_idx:%d\n", irq_idx);
+ SDE_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
return ret;
}
int sde_core_irq_disable(struct sde_kms *sde_kms, int *irq_idxs, u32 irq_count)
{
- int i, ret = 0, counts;
+ int i, ret = 0;
if (!sde_kms || !irq_idxs || !irq_count) {
SDE_ERROR("invalid params\n");
return -EINVAL;
}
- counts = atomic_read(&sde_kms->irq_obj.enable_counts[irq_idxs[0]]);
- if (counts == 2) {
- SDE_ERROR("%pS: irq_idx=%d enable_count=%d\n",
- __builtin_return_address(0), irq_idxs[0], counts);
- SDE_EVT32(irq_idxs[0], counts, SDE_EVTLOG_ERROR);
- }
-
for (i = 0; (i < irq_count) && !ret; i++)
ret = _sde_core_irq_disable(sde_kms, irq_idxs[i]);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 66be921..e103214 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -321,6 +321,12 @@ enum {
};
enum {
+ LTM_OFF,
+ LTM_VERSION,
+ LTM_PROP_MAX,
+};
+
+enum {
MIXER_OFF,
MIXER_LEN,
MIXER_PAIR_MASK,
@@ -604,6 +610,11 @@ static struct sde_prop_type ad_prop[] = {
{AD_VERSION, "qcom,sde-dspp-ad-version", false, PROP_TYPE_U32},
};
+static struct sde_prop_type ltm_prop[] = {
+ {LTM_OFF, "qcom,sde-dspp-ltm-off", false, PROP_TYPE_U32_ARRAY},
+ {LTM_VERSION, "qcom,sde-dspp-ltm-version", false, PROP_TYPE_U32},
+};
+
static struct sde_prop_type ds_top_prop[] = {
{DS_TOP_OFF, "qcom,sde-dest-scaler-top-off", false, PROP_TYPE_U32},
{DS_TOP_LEN, "qcom,sde-dest-scaler-top-size", false, PROP_TYPE_U32},
@@ -2229,12 +2240,14 @@ static int sde_dspp_parse_dt(struct device_node *np,
{
int rc, prop_count[DSPP_PROP_MAX], i;
int ad_prop_count[AD_PROP_MAX];
+ int ltm_prop_count[LTM_PROP_MAX];
bool prop_exists[DSPP_PROP_MAX], ad_prop_exists[AD_PROP_MAX];
+ bool ltm_prop_exists[LTM_PROP_MAX];
bool blocks_prop_exists[DSPP_BLOCKS_PROP_MAX];
- struct sde_prop_value *ad_prop_value = NULL;
+ struct sde_prop_value *ad_prop_value = NULL, *ltm_prop_value = NULL;
int blocks_prop_count[DSPP_BLOCKS_PROP_MAX];
struct sde_prop_value *prop_value = NULL, *blocks_prop_value = NULL;
- u32 off_count, ad_off_count;
+ u32 off_count, ad_off_count, ltm_off_count;
struct sde_dspp_cfg *dspp;
struct sde_dspp_sub_blks *sblk;
struct device_node *snp = NULL;
@@ -2280,6 +2293,22 @@ static int sde_dspp_parse_dt(struct device_node *np,
if (rc)
goto end;
+ /* Parse LTM dtsi entries */
+ ltm_prop_value = kcalloc(LTM_PROP_MAX,
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!ltm_prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+ rc = _validate_dt_entry(np, ltm_prop, ARRAY_SIZE(ltm_prop),
+ ltm_prop_count, <m_off_count);
+ if (rc)
+ goto end;
+ rc = _read_dt_entry(np, ltm_prop, ARRAY_SIZE(ltm_prop), ltm_prop_count,
+ ltm_prop_exists, ltm_prop_value);
+ if (rc)
+ goto end;
+
/* get DSPP feature dt properties if they exist */
snp = of_get_child_by_name(np, dspp_prop[DSPP_BLOCKS].prop_name);
if (snp) {
@@ -2331,11 +2360,24 @@ static int sde_dspp_parse_dt(struct device_node *np,
AD_VERSION, 0);
set_bit(SDE_DSPP_AD, &dspp->features);
}
+
+ sblk->ltm.id = SDE_DSPP_LTM;
+ sde_cfg->ltm_count = ltm_off_count;
+ if (ltm_prop_value && (i < ltm_off_count) &&
+ ltm_prop_exists[LTM_OFF]) {
+ sblk->ltm.base = PROP_VALUE_ACCESS(ltm_prop_value,
+ LTM_OFF, i);
+ sblk->ltm.version = PROP_VALUE_ACCESS(ltm_prop_value,
+ LTM_VERSION, 0);
+ set_bit(SDE_DSPP_LTM, &dspp->features);
+ }
+
}
end:
kfree(prop_value);
kfree(ad_prop_value);
+ kfree(ltm_prop_value);
kfree(blocks_prop_value);
return rc;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index a9bf35f..5107283 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -248,6 +248,7 @@ enum {
* @SDE_DSPP_HIST Histogram block
* @SDE_DSPP_VLUT PA VLUT block
* @SDE_DSPP_AD AD block
+ * @SDE_DSPP_LTM LTM block
* @SDE_DSPP_MAX maximum value
*/
enum {
@@ -262,6 +263,7 @@ enum {
SDE_DSPP_HIST,
SDE_DSPP_VLUT,
SDE_DSPP_AD,
+ SDE_DSPP_LTM,
SDE_DSPP_MAX
};
@@ -584,6 +586,7 @@ struct sde_dspp_sub_blks {
struct sde_pp_blk dither;
struct sde_pp_blk hist;
struct sde_pp_blk ad;
+ struct sde_pp_blk ltm;
struct sde_pp_blk vlut;
};
@@ -1181,6 +1184,7 @@ struct sde_mdss_cfg {
struct sde_reg_dma_cfg dma_cfg;
u32 ad_count;
+ u32 ltm_count;
u32 merge_3d_count;
struct sde_merge_3d_cfg merge_3d[MAX_BLOCKS];
@@ -1220,6 +1224,7 @@ struct sde_mdss_hw_cfg_handler {
#define BLK_INTF(s) ((s)->intf)
#define BLK_WB(s) ((s)->wb)
#define BLK_AD(s) ((s)->ad)
+#define BLK_LTM(s) ((s)->ltm)
/**
* sde_hw_catalog_init - sde hardware catalog init API parses dtsi property
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
index f12bda0..d1ae360 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
@@ -660,10 +660,9 @@ static void sde_hw_intr_dispatch_irq(struct sde_hw_intr *intr,
spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
}
-static int sde_hw_intr_enable_irq(struct sde_hw_intr *intr, int irq_idx)
+static int sde_hw_intr_enable_irq_nolock(struct sde_hw_intr *intr, int irq_idx)
{
int reg_idx;
- unsigned long irq_flags;
const struct sde_intr_reg *reg;
const struct sde_irq_type *irq;
const char *dbgstr = NULL;
@@ -686,7 +685,6 @@ static int sde_hw_intr_enable_irq(struct sde_hw_intr *intr, int irq_idx)
reg = &intr->sde_irq_tbl[reg_idx];
- spin_lock_irqsave(&intr->irq_lock, irq_flags);
cache_irq_mask = intr->cache_irq_mask[reg_idx];
if (cache_irq_mask & irq->irq_mask) {
dbgstr = "SDE IRQ already set:";
@@ -704,7 +702,6 @@ static int sde_hw_intr_enable_irq(struct sde_hw_intr *intr, int irq_idx)
intr->cache_irq_mask[reg_idx] = cache_irq_mask;
}
- spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
irq->irq_mask, cache_irq_mask);
@@ -761,25 +758,6 @@ static int sde_hw_intr_disable_irq_nolock(struct sde_hw_intr *intr, int irq_idx)
return 0;
}
-static int sde_hw_intr_disable_irq(struct sde_hw_intr *intr, int irq_idx)
-{
- unsigned long irq_flags;
-
- if (!intr)
- return -EINVAL;
-
- if (irq_idx < 0 || irq_idx >= intr->sde_irq_map_size) {
- pr_err("invalid IRQ index: [%d]\n", irq_idx);
- return -EINVAL;
- }
-
- spin_lock_irqsave(&intr->irq_lock, irq_flags);
- sde_hw_intr_disable_irq_nolock(intr, irq_idx);
- spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
-
- return 0;
-}
-
static int sde_hw_intr_clear_irqs(struct sde_hw_intr *intr)
{
int i;
@@ -1040,8 +1018,7 @@ static void __setup_intr_ops(struct sde_hw_intr_ops *ops)
{
ops->set_mask = sde_hw_intr_set_mask;
ops->irq_idx_lookup = sde_hw_intr_irqidx_lookup;
- ops->enable_irq = sde_hw_intr_enable_irq;
- ops->disable_irq = sde_hw_intr_disable_irq;
+ ops->enable_irq_nolock = sde_hw_intr_enable_irq_nolock;
ops->disable_irq_nolock = sde_hw_intr_disable_irq_nolock;
ops->dispatch_irqs = sde_hw_intr_dispatch_irq;
ops->clear_all_irqs = sde_hw_intr_clear_irqs;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
index b75eb4e..db7fa3f 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
@@ -128,22 +128,12 @@ struct sde_hw_intr_ops {
u32 instance_idx);
/**
- * enable_irq - Enable IRQ based on lookup IRQ index
+ * enable_irq_nolock - Enable IRQ based on lookup IRQ index without lock
* @intr: HW interrupt handle
* @irq_idx: Lookup irq index return from irq_idx_lookup
* @return: 0 for success, otherwise failure
*/
- int (*enable_irq)(
- struct sde_hw_intr *intr,
- int irq_idx);
-
- /**
- * disable_irq - Disable IRQ based on lookup IRQ index
- * @intr: HW interrupt handle
- * @irq_idx: Lookup irq index return from irq_idx_lookup
- * @return: 0 for success, otherwise failure
- */
- int (*disable_irq)(
+ int (*enable_irq_nolock)(
struct sde_hw_intr *intr,
int irq_idx);
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 9e0232c5..5339d34 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -164,6 +164,15 @@
To compile this driver as a module, choose M here: the
module will be called pmic8xxx-pwrkey.
+config INPUT_QTI_HAPTICS
+ tristate "Haptics support for QTI PMIC"
+ depends on MFD_SPMI_PMIC
+ help
+ This option enables device driver support for the haptics peripheral
+ found on Qualcomm Technologies, Inc. PMICs. The haptics peripheral
+ is capable of driving both LRA and ERM vibrators. This module provides
+ haptic feedback for user actions such as a long press on the touch screen.
+
config INPUT_SPARCSPKR
tristate "SPARC Speaker support"
depends on PCI && SPARC64
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 02e9edc..07cd0e8 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -57,6 +57,7 @@
obj-$(CONFIG_INPUT_PM8941_PWRKEY) += pm8941-pwrkey.o
obj-$(CONFIG_INPUT_PM8XXX_VIBRATOR) += pm8xxx-vibrator.o
obj-$(CONFIG_INPUT_PMIC8XXX_PWRKEY) += pmic8xxx-pwrkey.o
+obj-$(CONFIG_INPUT_QTI_HAPTICS) += qti-haptics.o
obj-$(CONFIG_INPUT_POWERMATE) += powermate.o
obj-$(CONFIG_INPUT_PWM_BEEPER) += pwm-beeper.o
obj-$(CONFIG_INPUT_PWM_VIBRA) += pwm-vibra.o
diff --git a/drivers/input/misc/qti-haptics.c b/drivers/input/misc/qti-haptics.c
new file mode 100644
index 0000000..3136e9e
--- /dev/null
+++ b/drivers/input/misc/qti-haptics.c
@@ -0,0 +1,2031 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/hrtimer.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+enum actutor_type {
+ ACT_LRA,
+ ACT_ERM,
+};
+
+enum lra_res_sig_shape {
+ RES_SIG_SINE,
+ RES_SIG_SQUARE,
+};
+
+enum lra_auto_res_mode {
+ AUTO_RES_MODE_ZXD,
+ AUTO_RES_MODE_QWD,
+};
+
+enum wf_src {
+ INT_WF_VMAX,
+ INT_WF_BUFFER,
+ EXT_WF_AUDIO,
+ EXT_WF_PWM,
+};
+
+enum haptics_custom_effect_param {
+ CUSTOM_DATA_EFFECT_IDX,
+ CUSTOM_DATA_TIMEOUT_SEC_IDX,
+ CUSTOM_DATA_TIMEOUT_MSEC_IDX,
+ CUSTOM_DATA_LEN,
+};
+
+/* common definitions */
+#define HAP_BRAKE_PATTERN_MAX 4
+#define HAP_WAVEFORM_BUFFER_MAX 8
+#define HAP_VMAX_MV_DEFAULT 1800
+#define HAP_VMAX_MV_MAX 3596
+#define HAP_ILIM_MA_DEFAULT 400
+#define HAP_ILIM_MA_MAX 800
+#define HAP_PLAY_RATE_US_DEFAULT 5715
+#define HAP_PLAY_RATE_US_MAX 20475
+#define HAP_PLAY_RATE_US_LSB 5
+#define VMAX_MIN_PLAY_TIME_US 20000
+#define HAP_SC_DET_MAX_COUNT 5
+#define HAP_SC_DET_TIME_US 1000000
+#define FF_EFFECT_COUNT_MAX 32
+#define HAP_DISABLE_DELAY_USEC 1000
+
+/* haptics module register definitions */
+#define REG_HAP_STATUS1 0x0A
+#define HAP_SC_DET_BIT BIT(3)
+#define HAP_BUSY_BIT BIT(1)
+
+#define REG_HAP_EN_CTL1 0x46
+#define HAP_EN_BIT BIT(7)
+
+#define REG_HAP_EN_CTL2 0x48
+#define HAP_AUTO_STANDBY_EN_BIT BIT(1)
+#define HAP_BRAKE_EN_BIT BIT(0)
+
+#define REG_HAP_EN_CTL3 0x4A
+#define HAP_HBRIDGE_EN_BIT BIT(7)
+#define HAP_PWM_SIGNAL_EN_BIT BIT(6)
+#define HAP_ILIM_EN_BIT BIT(5)
+#define HAP_ILIM_CC_EN_BIT BIT(4)
+#define HAP_AUTO_RES_RBIAS_EN_BIT BIT(3)
+#define HAP_DAC_EN_BIT BIT(2)
+#define HAP_ZX_HYST_EN_BIT BIT(1)
+#define HAP_PWM_CTL_EN_BIT BIT(0)
+
+#define REG_HAP_AUTO_RES_CTRL 0x4B
+#define HAP_AUTO_RES_EN_BIT BIT(7)
+#define HAP_SEL_AUTO_RES_PERIOD BIT(6)
+#define HAP_AUTO_RES_CNT_ERR_DELTA_MASK GENMASK(5, 4)
+#define HAP_AUTO_RES_CNT_ERR_DELTA_SHIFT 4
+#define HAP_AUTO_RES_ERR_RECOVERY_BIT BIT(3)
+#define HAP_AUTO_RES_EN_DLY_MASK GENMASK(2, 0)
+#define AUTO_RES_CNT_ERR_DELTA(x) (x << HAP_AUTO_RES_CNT_ERR_DELTA_SHIFT)
+#define AUTO_RES_EN_DLY(x) x
+
+#define REG_HAP_CFG1 0x4C
+#define REG_HAP_CFG2 0x4D
+#define HAP_LRA_RES_TYPE_BIT BIT(0)
+
+#define REG_HAP_SEL 0x4E
+#define HAP_WF_SOURCE_MASK GENMASK(5, 4)
+#define HAP_WF_SOURCE_SHIFT 4
+#define HAP_WF_TRIGGER_BIT BIT(0)
+#define HAP_WF_SOURCE_VMAX (0 << HAP_WF_SOURCE_SHIFT)
+#define HAP_WF_SOURCE_BUFFER (1 << HAP_WF_SOURCE_SHIFT)
+#define HAP_WF_SOURCE_AUDIO (2 << HAP_WF_SOURCE_SHIFT)
+#define HAP_WF_SOURCE_PWM (3 << HAP_WF_SOURCE_SHIFT)
+
+#define REG_HAP_AUTO_RES_CFG 0x4F
+#define HAP_AUTO_RES_MODE_BIT BIT(7)
+#define HAP_AUTO_RES_MODE_SHIFT 7
+#define HAP_AUTO_RES_CAL_DURATON_MASK GENMASK(6, 5)
+#define HAP_CAL_EOP_EN_BIT BIT(3)
+#define HAP_CAL_PERIOD_MASK GENMASK(2, 0)
+#define HAP_CAL_OPT3_EVERY_8_PERIOD 2
+
+#define REG_HAP_SLEW_CFG 0x50
+#define REG_HAP_VMAX_CFG 0x51
+#define HAP_VMAX_SIGN_BIT BIT(7)
+#define HAP_VMAX_OVD_BIT BIT(6)
+#define HAP_VMAX_MV_MASK GENMASK(5, 1)
+#define HAP_VMAX_MV_SHIFT 1
+#define HAP_VMAX_MV_LSB 116
+
+#define REG_HAP_ILIM_CFG 0x52
+#define REG_HAP_SC_DEB_CFG 0x53
+#define REG_HAP_RATE_CFG1 0x54
+#define REG_HAP_RATE_CFG2 0x55
+#define REG_HAP_INTERNAL_PWM 0x56
+#define REG_HAP_EXTERNAL_PWM 0x57
+#define REG_HAP_PWM 0x58
+
+#define REG_HAP_SC_CLR 0x59
+#define HAP_SC_CLR_BIT BIT(0)
+
+#define REG_HAP_ZX_CFG 0x5A
+#define HAP_ZX_DET_DEB_MASK GENMASK(2, 0)
+#define ZX_DET_DEB_10US 0
+#define ZX_DET_DEB_20US 1
+#define ZX_DET_DEB_40US 2
+#define ZX_DET_DEB_80US 3
+
+#define REG_HAP_BRAKE 0x5C
+#define HAP_BRAKE_PATTERN_MASK 0x3
+#define HAP_BRAKE_PATTERN_SHIFT 2
+
+#define REG_HAP_WF_REPEAT 0x5E
+#define HAP_WF_REPEAT_MASK GENMASK(6, 4)
+#define HAP_WF_REPEAT_SHIFT 4
+#define HAP_WF_S_REPEAT_MASK GENMASK(1, 0)
+
+#define REG_HAP_WF_S1 0x60
+#define HAP_WF_SIGN_BIT BIT(7)
+#define HAP_WF_OVD_BIT BIT(6)
+#define HAP_WF_AMP_BIT GENMASK(5, 1)
+#define HAP_WF_AMP_SHIFT 1
+
+#define REG_HAP_PLAY 0x70
+#define HAP_PLAY_BIT BIT(7)
+
+#define REG_HAP_SEC_ACCESS 0xD0
+
+struct qti_hap_effect {
+ int id;
+ u8 *pattern;
+ int pattern_length;
+ u16 play_rate_us;
+ u16 vmax_mv;
+ u8 wf_repeat_n;
+ u8 wf_s_repeat_n;
+ u8 brake[HAP_BRAKE_PATTERN_MAX];
+ int brake_pattern_length;
+ bool brake_en;
+ bool lra_auto_res_disable;
+};
+
+struct qti_hap_play_info {
+ struct qti_hap_effect *effect;
+ u16 vmax_mv;
+ int length_us;
+ int playing_pos;
+ bool playing_pattern;
+};
+
+struct qti_hap_config {
+ enum actutor_type act_type;
+ enum lra_res_sig_shape lra_shape;
+ enum lra_auto_res_mode lra_auto_res_mode;
+ enum wf_src ext_src;
+ u16 vmax_mv;
+ u16 ilim_ma;
+ u16 play_rate_us;
+ bool lra_allow_variable_play_rate;
+ bool use_ext_wf_src;
+};
+
+struct qti_hap_chip {
+ struct platform_device *pdev;
+ struct device *dev;
+ struct regmap *regmap;
+ struct input_dev *input_dev;
+ struct pwm_device *pwm_dev;
+ struct qti_hap_config config;
+ struct qti_hap_play_info play;
+ struct qti_hap_effect *predefined;
+ struct qti_hap_effect constant;
+ struct regulator *vdd_supply;
+ struct hrtimer stop_timer;
+ struct hrtimer hap_disable_timer;
+ struct dentry *hap_debugfs;
+ spinlock_t bus_lock;
+ ktime_t last_sc_time;
+ int play_irq;
+ int sc_irq;
+ int effects_count;
+ int sc_det_count;
+ u16 reg_base;
+ bool perm_disable;
+ bool play_irq_en;
+ bool vdd_enabled;
+};
+
+static int wf_repeat[8] = {1, 2, 4, 8, 16, 32, 64, 128};
+static int wf_s_repeat[4] = {1, 2, 4, 8};
+
+static inline bool is_secure(u8 addr)
+{
+ return ((addr & 0xFF) > 0xD0);
+}
+
+static int qti_haptics_read(struct qti_hap_chip *chip,
+ u8 addr, u8 *val, int len)
+{
+ int rc = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->bus_lock, flags);
+
+ rc = regmap_bulk_read(chip->regmap, chip->reg_base + addr, val, len);
+ if (rc < 0)
+ dev_err(chip->dev, "Reading addr 0x%x failed, rc=%d\n",
+ addr, rc);
+ spin_unlock_irqrestore(&chip->bus_lock, flags);
+
+ return rc;
+}
+
+static int qti_haptics_write(struct qti_hap_chip *chip,
+ u8 addr, u8 *val, int len)
+{
+ int rc = 0, i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->bus_lock, flags);
+ if (is_secure(addr)) {
+ for (i = 0; i < len; i++) {
+ rc = regmap_write(chip->regmap,
+ chip->reg_base + REG_HAP_SEC_ACCESS,
+ 0xA5);
+ if (rc < 0) {
+ dev_err(chip->dev, "write SEC_ACCESS failed, rc=%d\n",
+ rc);
+ goto unlock;
+ }
+
+ rc = regmap_write(chip->regmap,
+ chip->reg_base + addr + i, val[i]);
+ if (rc < 0) {
+ dev_err(chip->dev, "write val 0x%x to addr 0x%x failed, rc=%d\n",
+ val[i], addr + i, rc);
+ goto unlock;
+ }
+ }
+ } else {
+ if (len > 1)
+ rc = regmap_bulk_write(chip->regmap,
+ chip->reg_base + addr, val, len);
+ else
+ rc = regmap_write(chip->regmap,
+ chip->reg_base + addr, *val);
+
+ if (rc < 0)
+ dev_err(chip->dev, "write addr 0x%x failed, rc=%d\n",
+ addr, rc);
+ }
+
+ for (i = 0; i < len; i++)
+ dev_dbg(chip->dev, "Update addr 0x%x to val 0x%x\n",
+ addr + i, val[i]);
+
+unlock:
+ spin_unlock_irqrestore(&chip->bus_lock, flags);
+ return rc;
+}
+
+static int qti_haptics_masked_write(struct qti_hap_chip *chip, u8 addr,
+ u8 mask, u8 val)
+{
+ int rc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->bus_lock, flags);
+ if (is_secure(addr)) {
+ rc = regmap_write(chip->regmap,
+ chip->reg_base + REG_HAP_SEC_ACCESS,
+ 0xA5);
+ if (rc < 0) {
+ dev_err(chip->dev, "write SEC_ACCESS failed, rc=%d\n",
+ rc);
+ goto unlock;
+ }
+ }
+
+ rc = regmap_update_bits(chip->regmap, chip->reg_base + addr, mask, val);
+ if (rc < 0)
+ dev_err(chip->dev, "Update addr 0x%x to val 0x%x with mask 0x%x failed, rc=%d\n",
+ addr, val, mask, rc);
+
+ dev_dbg(chip->dev, "Update addr 0x%x to val 0x%x with mask 0x%x\n",
+ addr, val, mask);
+unlock:
+ spin_unlock_irqrestore(&chip->bus_lock, flags);
+
+ return rc;
+}
+
+static void construct_constant_waveform_in_pattern(
+ struct qti_hap_play_info *play)
+{
+ struct qti_hap_chip *chip = container_of(play,
+ struct qti_hap_chip, play);
+ struct qti_hap_config *config = &chip->config;
+ struct qti_hap_effect *effect = play->effect;
+ int total_samples, samples, left, magnitude, i, j, k;
+ int delta = INT_MAX, delta_min = INT_MAX;
+
+ /* Using play_rate_us in config for constant waveform */
+ effect->play_rate_us = config->play_rate_us;
+ total_samples = play->length_us / effect->play_rate_us;
+ left = play->length_us % effect->play_rate_us;
+
+ if (total_samples <= HAP_WAVEFORM_BUFFER_MAX) {
+ effect->pattern_length = total_samples;
+ effect->wf_s_repeat_n = 0;
+ effect->wf_repeat_n = 0;
+ } else {
+ /*
+ * Find a closest setting to achieve the constant waveform
+ * with the required length by using buffer waveform source:
+ * play_length_us = pattern_length * wf_s_repeat_n
+ * * wf_repeat_n * play_rate_us
+ */
+ for (i = 0; i < ARRAY_SIZE(wf_repeat); i++) {
+ for (j = 0; j < ARRAY_SIZE(wf_s_repeat); j++) {
+ for (k = 1; k <= HAP_WAVEFORM_BUFFER_MAX; k++) {
+ samples = k * wf_s_repeat[j] *
+ wf_repeat[i];
+ delta = abs(total_samples - samples);
+ if (delta < delta_min) {
+ delta_min = delta;
+ effect->pattern_length = k;
+ effect->wf_s_repeat_n = j;
+ effect->wf_repeat_n = i;
+ }
+ if (samples > total_samples)
+ break;
+ }
+ }
+ }
+ }
+
+ if (left > 0 && effect->pattern_length < HAP_WAVEFORM_BUFFER_MAX)
+ effect->pattern_length++;
+
+ play->length_us = effect->pattern_length * effect->play_rate_us;
+ dev_dbg(chip->dev, "total_samples = %d, pattern_length = %d, wf_s_repeat = %d, wf_repeat = %d\n",
+ total_samples, effect->pattern_length,
+ wf_s_repeat[effect->wf_s_repeat_n],
+ wf_repeat[effect->wf_repeat_n]);
+
+ for (i = 0; i < effect->pattern_length; i++) {
+ magnitude = play->vmax_mv / HAP_VMAX_MV_LSB;
+ effect->pattern[i] = (u8)magnitude << HAP_WF_AMP_SHIFT;
+ }
+}
+
+static int qti_haptics_config_wf_buffer(struct qti_hap_chip *chip)
+{
+ struct qti_hap_play_info *play = &chip->play;
+ struct qti_hap_effect *effect = play->effect;
+ u8 addr, pattern[HAP_WAVEFORM_BUFFER_MAX] = {0};
+ int rc = 0;
+ size_t len;
+
+ if (play->playing_pos == effect->pattern_length) {
+ dev_dbg(chip->dev, "pattern playing done\n");
+ return 0;
+ }
+
+ if (effect->pattern_length - play->playing_pos
+ >= HAP_WAVEFORM_BUFFER_MAX)
+ len = HAP_WAVEFORM_BUFFER_MAX;
+ else
+ len = effect->pattern_length - play->playing_pos;
+
+ dev_dbg(chip->dev, "copy %d bytes start from %d\n",
+ (int)len, play->playing_pos);
+ memcpy(pattern, &effect->pattern[play->playing_pos], len);
+
+ play->playing_pos += len;
+
+ addr = REG_HAP_WF_S1;
+ rc = qti_haptics_write(chip, REG_HAP_WF_S1, pattern,
+ HAP_WAVEFORM_BUFFER_MAX);
+ if (rc < 0)
+ dev_err(chip->dev, "Program WF_SAMPLE failed, rc=%d\n", rc);
+
+ return rc;
+}
+
+static int qti_haptics_config_wf_repeat(struct qti_hap_chip *chip)
+{
+ struct qti_hap_effect *effect = chip->play.effect;
+ u8 addr, mask, val;
+ int rc = 0;
+
+ addr = REG_HAP_WF_REPEAT;
+ mask = HAP_WF_REPEAT_MASK | HAP_WF_S_REPEAT_MASK;
+ val = effect->wf_repeat_n << HAP_WF_REPEAT_SHIFT;
+ val |= effect->wf_s_repeat_n;
+ rc = qti_haptics_masked_write(chip, addr, mask, val);
+ if (rc < 0)
+ dev_err(chip->dev, "Program WF_REPEAT failed, rc=%d\n", rc);
+
+ return rc;
+}
+
+static int qti_haptics_play(struct qti_hap_chip *chip, bool play)
+{
+ int rc = 0;
+ u8 val = play ? HAP_PLAY_BIT : 0;
+
+ rc = qti_haptics_write(chip,
+ REG_HAP_PLAY, &val, 1);
+ if (rc < 0)
+ dev_err(chip->dev, "%s playing haptics failed, rc=%d\n",
+ play ? "start" : "stop", rc);
+
+ return rc;
+}
+
+static int qti_haptics_module_en(struct qti_hap_chip *chip, bool en)
+{
+ int rc = 0;
+ u8 val = en ? HAP_EN_BIT : 0;
+
+ rc = qti_haptics_write(chip,
+ REG_HAP_EN_CTL1, &val, 1);
+ if (rc < 0)
+ dev_err(chip->dev, "%s haptics failed, rc=%d\n",
+ en ? "enable" : "disable", rc);
+
+
+ return rc;
+}
+
+static int qti_haptics_config_vmax(struct qti_hap_chip *chip, int vmax_mv)
+{
+ u8 addr, mask, val;
+ int rc;
+
+ addr = REG_HAP_VMAX_CFG;
+ mask = HAP_VMAX_MV_MASK;
+ val = (vmax_mv / HAP_VMAX_MV_LSB) << HAP_VMAX_MV_SHIFT;
+ rc = qti_haptics_masked_write(chip, addr, mask, val);
+ if (rc < 0)
+ dev_err(chip->dev, "write VMAX_CFG failed, rc=%d\n",
+ rc);
+
+ return rc;
+}
+
+static int qti_haptics_config_wf_src(struct qti_hap_chip *chip,
+ enum wf_src src)
+{
+ u8 addr, mask, val = 0;
+ int rc;
+
+ addr = REG_HAP_SEL;
+ mask = HAP_WF_SOURCE_MASK | HAP_WF_TRIGGER_BIT;
+ val = src << HAP_WF_SOURCE_SHIFT;
+ if (src == EXT_WF_AUDIO || src == EXT_WF_PWM)
+ val |= HAP_WF_TRIGGER_BIT;
+
+ rc = qti_haptics_masked_write(chip, addr, mask, val);
+ if (rc < 0)
+ dev_err(chip->dev, "set HAP_SEL failed, rc=%d\n", rc);
+
+ return rc;
+}
+
+static int qti_haptics_config_play_rate_us(struct qti_hap_chip *chip,
+ int play_rate_us)
+{
+ u8 addr, val[2];
+ int tmp, rc;
+
+ addr = REG_HAP_RATE_CFG1;
+ tmp = play_rate_us / HAP_PLAY_RATE_US_LSB;
+ val[0] = tmp & 0xff;
+ val[1] = (tmp >> 8) & 0xf;
+ rc = qti_haptics_write(chip, addr, val, 2);
+ if (rc < 0)
+ dev_err(chip->dev, "write play_rate failed, rc=%d\n", rc);
+
+ return rc;
+}
+
+static int qti_haptics_brake_enable(struct qti_hap_chip *chip, bool en)
+{
+ u8 addr, mask, val;
+ int rc;
+
+ addr = REG_HAP_EN_CTL2;
+ mask = HAP_BRAKE_EN_BIT;
+ val = en ? HAP_BRAKE_EN_BIT : 0;
+ rc = qti_haptics_masked_write(chip, addr, mask, val);
+ if (rc < 0)
+ dev_err(chip->dev, "write BRAKE_EN failed, rc=%d\n", rc);
+
+ return rc;
+}
+
+static int qti_haptics_config_brake(struct qti_hap_chip *chip, u8 *brake)
+{
+ u8 addr, val;
+ int i, rc;
+
+ addr = REG_HAP_BRAKE;
+ for (val = 0, i = 0; i < HAP_BRAKE_PATTERN_MAX; i++)
+ val |= (brake[i] & HAP_BRAKE_PATTERN_MASK) <<
+ i * HAP_BRAKE_PATTERN_SHIFT;
+
+ rc = qti_haptics_write(chip, addr, &val, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "write brake pattern failed, rc=%d\n", rc);
+ return rc;
+ }
+ /*
+ * Set BRAKE_EN regardless of the brake pattern, this helps to stop
+ * playing immediately once the valid values in WF_Sx are played.
+ */
+ rc = qti_haptics_brake_enable(chip, true);
+
+ return rc;
+}
+
+static int qti_haptics_lra_auto_res_enable(struct qti_hap_chip *chip, bool en)
+{
+ int rc;
+ u8 addr, val, mask;
+
+ addr = REG_HAP_AUTO_RES_CTRL;
+ mask = HAP_AUTO_RES_EN_BIT;
+ val = en ? HAP_AUTO_RES_EN_BIT : 0;
+ rc = qti_haptics_masked_write(chip, addr, mask, val);
+ if (rc < 0)
+ dev_err(chip->dev, "set AUTO_RES_CTRL failed, rc=%d\n", rc);
+
+ return rc;
+}
+
+#define HAP_CLEAR_PLAYING_RATE_US 15
+static int qti_haptics_clear_settings(struct qti_hap_chip *chip)
+{
+ int rc;
+ u8 pattern[HAP_WAVEFORM_BUFFER_MAX] = {1, 0, 0, 0, 0, 0, 0, 0};
+
+ rc = qti_haptics_brake_enable(chip, false);
+ if (rc < 0)
+ return rc;
+
+ rc = qti_haptics_lra_auto_res_enable(chip, false);
+ if (rc < 0)
+ return rc;
+
+ rc = qti_haptics_config_play_rate_us(chip, HAP_CLEAR_PLAYING_RATE_US);
+ if (rc < 0)
+ return rc;
+
+ rc = qti_haptics_write(chip, REG_HAP_WF_S1, pattern,
+ HAP_WAVEFORM_BUFFER_MAX);
+ if (rc < 0)
+ return rc;
+
+ rc = qti_haptics_play(chip, true);
+ if (rc < 0)
+ return rc;
+
+ rc = qti_haptics_play(chip, false);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+static int qti_haptics_load_constant_waveform(struct qti_hap_chip *chip)
+{
+ struct qti_hap_play_info *play = &chip->play;
+ struct qti_hap_config *config = &chip->config;
+ int rc = 0;
+
+ rc = qti_haptics_config_play_rate_us(chip, config->play_rate_us);
+ if (rc < 0)
+ return rc;
+ /*
+ * Using VMAX waveform source if playing length is >= 20ms,
+ * otherwise using buffer waveform source and calculate the
+ * pattern length and repeating times to achieve accurate
+ * playing time accuracy.
+ */
+ if (play->length_us >= VMAX_MIN_PLAY_TIME_US) {
+ rc = qti_haptics_config_vmax(chip, play->vmax_mv);
+ if (rc < 0)
+ return rc;
+
+ /* Enable Auto-Resonance when VMAX wf-src is selected */
+ if (config->act_type == ACT_LRA) {
+ rc = qti_haptics_lra_auto_res_enable(chip, true);
+ if (rc < 0)
+ return rc;
+ }
+
+ /* Set WF_SOURCE to VMAX */
+ rc = qti_haptics_config_wf_src(chip, INT_WF_VMAX);
+ if (rc < 0)
+ return rc;
+
+ play->playing_pattern = false;
+ play->effect = NULL;
+ } else {
+ rc = qti_haptics_config_vmax(chip, config->vmax_mv);
+ if (rc < 0)
+ return rc;
+
+ play->effect = &chip->constant;
+ play->playing_pos = 0;
+ /* Format and config waveform in patterns */
+ construct_constant_waveform_in_pattern(play);
+ rc = qti_haptics_config_wf_buffer(chip);
+ if (rc < 0)
+ return rc;
+
+ rc = qti_haptics_config_wf_repeat(chip);
+ if (rc < 0)
+ return rc;
+
+ /* Set WF_SOURCE to buffer */
+ rc = qti_haptics_config_wf_src(chip, INT_WF_BUFFER);
+ if (rc < 0)
+ return rc;
+
+ play->playing_pattern = true;
+ }
+
+ return 0;
+}
+
+static int qti_haptics_load_predefined_effect(struct qti_hap_chip *chip,
+ int effect_idx)
+{
+ struct qti_hap_play_info *play = &chip->play;
+ struct qti_hap_config *config = &chip->config;
+ int rc = 0;
+
+ if (effect_idx >= chip->effects_count)
+ return -EINVAL;
+
+ play->effect = &chip->predefined[effect_idx];
+ play->playing_pos = 0;
+ rc = qti_haptics_config_vmax(chip, play->vmax_mv);
+ if (rc < 0)
+ return rc;
+
+ rc = qti_haptics_config_play_rate_us(chip, play->effect->play_rate_us);
+ if (rc < 0)
+ return rc;
+
+ if (config->act_type == ACT_LRA) {
+ rc = qti_haptics_lra_auto_res_enable(chip,
+ !play->effect->lra_auto_res_disable);
+ if (rc < 0)
+ return rc;
+ }
+
+ /* Set brake pattern in the effect */
+ rc = qti_haptics_config_brake(chip, play->effect->brake);
+ if (rc < 0)
+ return rc;
+
+ rc = qti_haptics_config_wf_buffer(chip);
+ if (rc < 0)
+ return rc;
+
+ rc = qti_haptics_config_wf_repeat(chip);
+ if (rc < 0)
+ return rc;
+
+ /* Set WF_SOURCE to buffer */
+ rc = qti_haptics_config_wf_src(chip, INT_WF_BUFFER);
+ if (rc < 0)
+ return rc;
+
+ play->playing_pattern = true;
+
+ return 0;
+}
+
+static irqreturn_t qti_haptics_play_irq_handler(int irq, void *data)
+{
+ struct qti_hap_chip *chip = (struct qti_hap_chip *)data;
+ struct qti_hap_play_info *play = &chip->play;
+ struct qti_hap_effect *effect = play->effect;
+ int rc;
+
+ dev_dbg(chip->dev, "play_irq triggered\n");
+ if (play->playing_pos == effect->pattern_length) {
+ dev_dbg(chip->dev, "waveform playing done\n");
+ if (chip->play_irq_en) {
+ disable_irq_nosync(chip->play_irq);
+ chip->play_irq_en = false;
+ }
+
+ goto handled;
+ }
+
+ /* Config to play remaining patterns */
+ rc = qti_haptics_config_wf_repeat(chip);
+ if (rc < 0)
+ goto handled;
+
+ rc = qti_haptics_config_wf_buffer(chip);
+ if (rc < 0)
+ goto handled;
+
+handled:
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qti_haptics_sc_irq_handler(int irq, void *data)
+{
+ struct qti_hap_chip *chip = (struct qti_hap_chip *)data;
+ u8 addr, val;
+ ktime_t temp;
+ s64 sc_delta_time_us;
+ int rc;
+
+ dev_dbg(chip->dev, "sc_irq triggered\n");
+ addr = REG_HAP_STATUS1;
+ rc = qti_haptics_read(chip, addr, &val, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "read HAP_STATUS1 failed, rc=%d\n", rc);
+ goto handled;
+ }
+
+ if (!(val & HAP_SC_DET_BIT))
+ goto handled;
+
+ temp = ktime_get();
+ sc_delta_time_us = ktime_us_delta(temp, chip->last_sc_time);
+ chip->last_sc_time = temp;
+
+ if (sc_delta_time_us > HAP_SC_DET_TIME_US)
+ chip->sc_det_count = 0;
+ else
+ chip->sc_det_count++;
+
+ addr = REG_HAP_SC_CLR;
+ val = HAP_SC_CLR_BIT;
+ rc = qti_haptics_write(chip, addr, &val, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "write SC_CLR failed, rc=%d\n", rc);
+ goto handled;
+ }
+
+ if (chip->sc_det_count > HAP_SC_DET_MAX_COUNT) {
+ rc = qti_haptics_module_en(chip, false);
+ if (rc < 0)
+ goto handled;
+
+ dev_crit(chip->dev, "Short circuit persists, disable haptics\n");
+ chip->perm_disable = true;
+ }
+
+handled:
+ return IRQ_HANDLED;
+}
+
+static inline void get_play_length(struct qti_hap_play_info *play,
+ int *length_us)
+{
+ struct qti_hap_effect *effect = play->effect;
+ int tmp;
+
+ tmp = effect->pattern_length * effect->play_rate_us;
+ tmp *= wf_s_repeat[effect->wf_s_repeat_n];
+ tmp *= wf_repeat[effect->wf_repeat_n];
+ if (effect->brake_en)
+ tmp += effect->play_rate_us * effect->brake_pattern_length;
+
+ *length_us = tmp;
+}
+
+static int qti_haptics_upload_effect(struct input_dev *dev,
+ struct ff_effect *effect, struct ff_effect *old)
+{
+ struct qti_hap_chip *chip = input_get_drvdata(dev);
+ struct qti_hap_config *config = &chip->config;
+ struct qti_hap_play_info *play = &chip->play;
+ int rc = 0, tmp, i;
+ s16 level, data[CUSTOM_DATA_LEN];
+ ktime_t rem;
+ s64 time_us;
+
+ if (hrtimer_active(&chip->hap_disable_timer)) {
+ rem = hrtimer_get_remaining(&chip->hap_disable_timer);
+ time_us = ktime_to_us(rem);
+ dev_dbg(chip->dev, "waiting for playing clear sequence: %lld us\n",
+ time_us);
+ usleep_range(time_us, time_us + 100);
+ }
+
+ switch (effect->type) {
+ case FF_CONSTANT:
+ play->length_us = effect->replay.length * USEC_PER_MSEC;
+ level = effect->u.constant.level;
+ tmp = level * config->vmax_mv;
+ play->vmax_mv = tmp / 0x7fff;
+ dev_dbg(chip->dev, "upload constant effect, length = %dus, vmax_mv=%d\n",
+ play->length_us, play->vmax_mv);
+
+ rc = qti_haptics_load_constant_waveform(chip);
+ if (rc < 0) {
+ dev_err(chip->dev, "Play constant waveform failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+ break;
+
+ case FF_PERIODIC:
+ if (chip->effects_count == 0)
+ return -EINVAL;
+
+ if (effect->u.periodic.waveform != FF_CUSTOM) {
+ dev_err(chip->dev, "Only accept custom waveforms\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(data, effect->u.periodic.custom_data,
+ sizeof(s16) * CUSTOM_DATA_LEN))
+ return -EFAULT;
+
+ for (i = 0; i < chip->effects_count; i++)
+ if (chip->predefined[i].id ==
+ data[CUSTOM_DATA_EFFECT_IDX])
+ break;
+
+ if (i == chip->effects_count) {
+ dev_err(chip->dev, "predefined effect %d is NOT supported\n",
+ data[0]);
+ return -EINVAL;
+ }
+
+ level = effect->u.periodic.magnitude;
+ tmp = level * chip->predefined[i].vmax_mv;
+ play->vmax_mv = tmp / 0x7fff;
+
+ dev_dbg(chip->dev, "upload effect %d, vmax_mv=%d\n",
+ chip->predefined[i].id, play->vmax_mv);
+ rc = qti_haptics_load_predefined_effect(chip, i);
+ if (rc < 0) {
+ dev_err(chip->dev, "Play predefined effect %d failed, rc=%d\n",
+ chip->predefined[i].id, rc);
+ return rc;
+ }
+
+ get_play_length(play, &play->length_us);
+ data[CUSTOM_DATA_TIMEOUT_SEC_IDX] =
+ play->length_us / USEC_PER_SEC;
+ data[CUSTOM_DATA_TIMEOUT_MSEC_IDX] =
+ (play->length_us % USEC_PER_SEC) / USEC_PER_MSEC;
+
+ /*
+ * Copy the custom data contains the play length back to
+ * userspace so that the userspace client can wait and
+ * send stop playing command after it's done.
+ */
+ if (copy_to_user(effect->u.periodic.custom_data, data,
+ sizeof(s16) * CUSTOM_DATA_LEN))
+ return -EFAULT;
+ break;
+
+ default:
+ dev_err(chip->dev, "Unsupported effect type: %d\n",
+ effect->type);
+ return -EINVAL;
+ }
+
+ if (chip->vdd_supply && !chip->vdd_enabled) {
+ rc = regulator_enable(chip->vdd_supply);
+ if (rc < 0) {
+ dev_err(chip->dev, "Enable VDD supply failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+ chip->vdd_enabled = true;
+ }
+
+ return 0;
+}
+
+static int qti_haptics_playback(struct input_dev *dev, int effect_id, int val)
+{
+ struct qti_hap_chip *chip = input_get_drvdata(dev);
+ struct qti_hap_play_info *play = &chip->play;
+ s64 secs;
+ unsigned long nsecs;
+ int rc = 0;
+
+ dev_dbg(chip->dev, "playback, val = %d\n", val);
+ if (!!val) {
+ rc = qti_haptics_module_en(chip, true);
+ if (rc < 0)
+ return rc;
+
+ rc = qti_haptics_play(chip, true);
+ if (rc < 0)
+ return rc;
+
+ if (play->playing_pattern) {
+ if (!chip->play_irq_en) {
+ enable_irq(chip->play_irq);
+ chip->play_irq_en = true;
+ }
+ /* Toggle PLAY when playing pattern */
+ rc = qti_haptics_play(chip, false);
+ if (rc < 0)
+ return rc;
+ } else {
+ if (chip->play_irq_en) {
+ disable_irq_nosync(chip->play_irq);
+ chip->play_irq_en = false;
+ }
+ secs = play->length_us / USEC_PER_SEC;
+ nsecs = (play->length_us % USEC_PER_SEC) *
+ NSEC_PER_USEC;
+ hrtimer_start(&chip->stop_timer, ktime_set(secs, nsecs),
+ HRTIMER_MODE_REL);
+ }
+ } else {
+ play->length_us = 0;
+ rc = qti_haptics_play(chip, false);
+ if (rc < 0)
+ return rc;
+
+ if (chip->play_irq_en) {
+ disable_irq_nosync(chip->play_irq);
+ chip->play_irq_en = false;
+ }
+ }
+
+ return rc;
+}
+
+static int qti_haptics_erase(struct input_dev *dev, int effect_id)
+{
+ struct qti_hap_chip *chip = input_get_drvdata(dev);
+ int delay_us, rc = 0;
+
+ if (chip->vdd_supply && chip->vdd_enabled) {
+ rc = regulator_disable(chip->vdd_supply);
+ if (rc < 0) {
+ dev_err(chip->dev, "Disable VDD supply failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+ chip->vdd_enabled = false;
+ }
+
+ rc = qti_haptics_clear_settings(chip);
+ if (rc < 0) {
+ dev_err(chip->dev, "clear setting failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (chip->play.effect)
+ delay_us = chip->play.effect->play_rate_us;
+ else
+ delay_us = chip->config.play_rate_us;
+
+ delay_us += HAP_DISABLE_DELAY_USEC;
+ hrtimer_start(&chip->hap_disable_timer,
+ ktime_set(0, delay_us * NSEC_PER_USEC),
+ HRTIMER_MODE_REL);
+
+ return rc;
+}
+
+static void qti_haptics_set_gain(struct input_dev *dev, u16 gain)
+{
+ struct qti_hap_chip *chip = input_get_drvdata(dev);
+ struct qti_hap_config *config = &chip->config;
+ struct qti_hap_play_info *play = &chip->play;
+
+ if (gain == 0)
+ return;
+
+ if (gain > 0x7fff)
+ gain = 0x7fff;
+
+ play->vmax_mv = ((u32)(gain * config->vmax_mv)) / 0x7fff;
+ qti_haptics_config_vmax(chip, play->vmax_mv);
+}
+
+static int qti_haptics_hw_init(struct qti_hap_chip *chip)
+{
+ struct qti_hap_config *config = &chip->config;
+ u8 addr, val, mask;
+ int rc = 0;
+
+ /* Config actuator type */
+ addr = REG_HAP_CFG1;
+ val = config->act_type;
+ rc = qti_haptics_write(chip, addr, &val, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "write actuator type failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Config ilim_ma */
+ addr = REG_HAP_ILIM_CFG;
+ val = config->ilim_ma == 400 ? 0 : 1;
+ rc = qti_haptics_write(chip, addr, &val, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "write ilim_ma failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Set HAP_EN_CTL3 */
+ addr = REG_HAP_EN_CTL3;
+ val = HAP_HBRIDGE_EN_BIT | HAP_PWM_SIGNAL_EN_BIT | HAP_ILIM_EN_BIT |
+ HAP_ILIM_CC_EN_BIT | HAP_AUTO_RES_RBIAS_EN_BIT |
+ HAP_DAC_EN_BIT | HAP_PWM_CTL_EN_BIT;
+ rc = qti_haptics_write(chip, addr, &val, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "set EN_CTL3 failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Set ZX_CFG */
+ addr = REG_HAP_ZX_CFG;
+ mask = HAP_ZX_DET_DEB_MASK;
+ val = ZX_DET_DEB_80US;
+ rc = qti_haptics_masked_write(chip, addr, mask, val);
+ if (rc < 0) {
+ dev_err(chip->dev, "write ZX_CFG failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * Config play rate: this is the resonance period for LRA,
+ * or the play duration of each waveform sample for ERM.
+ */
+ rc = qti_haptics_config_play_rate_us(chip, config->play_rate_us);
+ if (rc < 0)
+ return rc;
+
+ /* Set external waveform source if it's used */
+ if (config->use_ext_wf_src) {
+ rc = qti_haptics_config_wf_src(chip, config->ext_src);
+ if (rc < 0)
+ return rc;
+ }
+
+ /*
+ * Skip configurations below for ERM actuator
+ * as they're only for LRA actuators
+ */
+ if (config->act_type == ACT_ERM)
+ return 0;
+
+ addr = REG_HAP_CFG2;
+ val = config->lra_shape;
+ rc = qti_haptics_write(chip, addr, &val, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "write lra_sig_shape failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ addr = REG_HAP_AUTO_RES_CFG;
+ mask = HAP_AUTO_RES_MODE_BIT | HAP_CAL_EOP_EN_BIT | HAP_CAL_PERIOD_MASK;
+ val = config->lra_auto_res_mode << HAP_AUTO_RES_MODE_SHIFT;
+ val |= HAP_CAL_EOP_EN_BIT | HAP_CAL_OPT3_EVERY_8_PERIOD;
+ rc = qti_haptics_masked_write(chip, addr, mask, val);
+ if (rc < 0) {
+ dev_err(chip->dev, "set AUTO_RES_CFG failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ addr = REG_HAP_AUTO_RES_CTRL;
+ val = HAP_AUTO_RES_EN_BIT | HAP_SEL_AUTO_RES_PERIOD |
+ AUTO_RES_CNT_ERR_DELTA(2) | HAP_AUTO_RES_ERR_RECOVERY_BIT |
+ AUTO_RES_EN_DLY(4);
+ rc = qti_haptics_write(chip, addr, &val, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "set AUTO_RES_CTRL failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static enum hrtimer_restart qti_hap_stop_timer(struct hrtimer *timer)
+{
+ struct qti_hap_chip *chip = container_of(timer, struct qti_hap_chip,
+ stop_timer);
+ int rc;
+
+ chip->play.length_us = 0;
+ rc = qti_haptics_play(chip, false);
+ if (rc < 0)
+ dev_err(chip->dev, "Stop playing failed, rc=%d\n", rc);
+
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart qti_hap_disable_timer(struct hrtimer *timer)
+{
+ struct qti_hap_chip *chip = container_of(timer, struct qti_hap_chip,
+ hap_disable_timer);
+ int rc;
+
+ rc = qti_haptics_module_en(chip, false);
+ if (rc < 0)
+ dev_err(chip->dev, "Disable haptics module failed, rc=%d\n",
+ rc);
+
+ return HRTIMER_NORESTART;
+}
+
+static void verify_brake_setting(struct qti_hap_effect *effect)
+{
+ int i = effect->brake_pattern_length - 1;
+ u8 val = 0;
+
+ for (; i >= 0; i--) {
+ if (effect->brake[i] != 0)
+ break;
+
+ effect->brake_pattern_length--;
+ }
+
+ for (i = 0; i < effect->brake_pattern_length; i++) {
+ effect->brake[i] &= HAP_BRAKE_PATTERN_MASK;
+ val |= effect->brake[i] << (i * HAP_BRAKE_PATTERN_SHIFT);
+ }
+
+ effect->brake_en = (val != 0);
+}
+
+static int qti_haptics_parse_dt_per_effect(struct qti_hap_chip *chip)
+{
+ const struct device_node *node = chip->dev->of_node;
+ struct device_node *child_node;
+ struct qti_hap_config *config = &chip->config;
+ struct qti_hap_effect *effect;
+ int rc, i = 0, j, m;
+ u32 tmp;
+
+ for_each_available_child_of_node(node, child_node) {
+ effect = &chip->predefined[i++];
+ rc = of_property_read_u32(child_node, "qcom,effect-id",
+ &effect->id);
+ if (rc < 0) {
+ dev_err(chip->dev, "Read qcom,effect-id failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ effect->vmax_mv = config->vmax_mv;
+ rc = of_property_read_u32(child_node, "qcom,wf-vmax-mv", &tmp);
+ if (rc < 0)
+ dev_dbg(chip->dev, "Read qcom,wf-vmax-mv failed, rc=%d\n",
+ rc);
+ else
+ effect->vmax_mv = (tmp > HAP_VMAX_MV_MAX) ?
+ HAP_VMAX_MV_MAX : tmp;
+
+ rc = of_property_count_elems_of_size(child_node,
+ "qcom,wf-pattern", sizeof(u8));
+ if (rc < 0) {
+ dev_err(chip->dev, "Count qcom,wf-pattern property failed, rc=%d\n",
+ rc);
+ return rc;
+ } else if (rc == 0) {
+ dev_dbg(chip->dev, "qcom,wf-pattern has no data\n");
+ return -EINVAL;
+ }
+
+ effect->pattern_length = rc;
+ effect->pattern = devm_kcalloc(chip->dev,
+ effect->pattern_length, sizeof(u8), GFP_KERNEL);
+ if (!effect->pattern)
+ return -ENOMEM;
+
+ rc = of_property_read_u8_array(child_node, "qcom,wf-pattern",
+ effect->pattern, effect->pattern_length);
+ if (rc < 0) {
+ dev_err(chip->dev, "Read qcom,wf-pattern property failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ effect->play_rate_us = config->play_rate_us;
+ rc = of_property_read_u32(child_node, "qcom,wf-play-rate-us",
+ &tmp);
+ if (rc < 0)
+ dev_dbg(chip->dev, "Read qcom,wf-play-rate-us failed, rc=%d\n",
+ rc);
+ else
+ effect->play_rate_us = tmp;
+
+ if (config->act_type == ACT_LRA &&
+ !config->lra_allow_variable_play_rate &&
+ config->play_rate_us != effect->play_rate_us) {
+ dev_warn(chip->dev, "play rate should match with LRA resonance frequency\n");
+ effect->play_rate_us = config->play_rate_us;
+ }
+
+ rc = of_property_read_u32(child_node, "qcom,wf-repeat-count",
+ &tmp);
+ if (rc < 0) {
+ dev_dbg(chip->dev, "Read qcom,wf-repeat-count failed, rc=%d\n",
+ rc);
+ } else {
+ for (j = 0; j < ARRAY_SIZE(wf_repeat); j++)
+ if (tmp <= wf_repeat[j])
+ break;
+
+ effect->wf_repeat_n = j;
+ }
+
+ rc = of_property_read_u32(child_node, "qcom,wf-s-repeat-count",
+ &tmp);
+ if (rc < 0) {
+ dev_dbg(chip->dev, "Read qcom,wf-s-repeat-count failed, rc=%d\n",
+ rc);
+ } else {
+ for (j = 0; j < ARRAY_SIZE(wf_s_repeat); j++)
+ if (tmp <= wf_s_repeat[j])
+ break;
+
+ effect->wf_s_repeat_n = j;
+ }
+
+ effect->lra_auto_res_disable = of_property_read_bool(child_node,
+ "qcom,lra-auto-resonance-disable");
+
+ tmp = of_property_count_elems_of_size(child_node,
+ "qcom,wf-brake-pattern", sizeof(u8));
+ if (tmp <= 0)
+ continue;
+
+ if (tmp > HAP_BRAKE_PATTERN_MAX) {
+ dev_err(chip->dev, "wf-brake-pattern shouldn't be more than %d bytes\n",
+ HAP_BRAKE_PATTERN_MAX);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u8_array(child_node,
+ "qcom,wf-brake-pattern", effect->brake, tmp);
+ if (rc < 0) {
+ dev_err(chip->dev, "Failed to get wf-brake-pattern, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ effect->brake_pattern_length = tmp;
+ verify_brake_setting(effect);
+ }
+
+ for (j = 0; j < i; j++) {
+ dev_dbg(chip->dev, "effect: %d\n", chip->predefined[j].id);
+ dev_dbg(chip->dev, " vmax: %d mv\n",
+ chip->predefined[j].vmax_mv);
+ dev_dbg(chip->dev, " play_rate: %d us\n",
+ chip->predefined[j].play_rate_us);
+ for (m = 0; m < chip->predefined[j].pattern_length; m++)
+ dev_dbg(chip->dev, " pattern[%d]: 0x%x\n",
+ m, chip->predefined[j].pattern[m]);
+ for (m = 0; m < chip->predefined[j].brake_pattern_length; m++)
+ dev_dbg(chip->dev, " brake_pattern[%d]: 0x%x\n",
+ m, chip->predefined[j].brake[m]);
+ dev_dbg(chip->dev, " brake_en: %d\n",
+ chip->predefined[j].brake_en);
+ dev_dbg(chip->dev, " wf_repeat_n: %d\n",
+ chip->predefined[j].wf_repeat_n);
+ dev_dbg(chip->dev, " wf_s_repeat_n: %d\n",
+ chip->predefined[j].wf_s_repeat_n);
+ dev_dbg(chip->dev, " lra_auto_res_disable: %d\n",
+ chip->predefined[j].lra_auto_res_disable);
+ }
+
+ return 0;
+}
+
+static int qti_haptics_lra_parse_dt(struct qti_hap_chip *chip)
+{
+ struct qti_hap_config *config = &chip->config;
+ const struct device_node *node = chip->dev->of_node;
+ const char *str;
+ int rc;
+
+ if (config->act_type != ACT_LRA)
+ return 0;
+
+ config->lra_shape = RES_SIG_SINE;
+ rc = of_property_read_string(node,
+ "qcom,lra-resonance-sig-shape", &str);
+ if (!rc) {
+ if (strcmp(str, "sine") == 0) {
+ config->lra_shape = RES_SIG_SINE;
+ } else if (strcmp(str, "square") == 0) {
+ config->lra_shape = RES_SIG_SQUARE;
+ } else {
+ dev_err(chip->dev, "Invalid resonance signal shape: %s\n",
+ str);
+ return -EINVAL;
+ }
+ }
+
+ config->lra_allow_variable_play_rate = of_property_read_bool(node,
+ "qcom,lra-allow-variable-play-rate");
+
+ config->lra_auto_res_mode = AUTO_RES_MODE_ZXD;
+ rc = of_property_read_string(node, "qcom,lra-auto-resonance-mode",
+ &str);
+ if (!rc) {
+ if (strcmp(str, "zxd") == 0) {
+ config->lra_auto_res_mode = AUTO_RES_MODE_ZXD;
+ } else if (strcmp(str, "qwd") == 0) {
+ config->lra_auto_res_mode = AUTO_RES_MODE_QWD;
+ } else {
+ dev_err(chip->dev, "Invalid auto resonance mode: %s\n",
+ str);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int qti_haptics_parse_dt(struct qti_hap_chip *chip)
+{
+ struct qti_hap_config *config = &chip->config;
+ const struct device_node *node = chip->dev->of_node;
+ const char *str;
+ int rc = 0, tmp;
+
+ rc = of_property_read_u32(node, "reg", &tmp);
+ if (rc < 0) {
+ dev_err(chip->dev, "Failed to reg base, rc=%d\n", rc);
+ return rc;
+ }
+ chip->reg_base = (u16)tmp;
+
+ chip->sc_irq = platform_get_irq_byname(chip->pdev, "hap-sc-irq");
+ if (chip->sc_irq < 0) {
+ dev_err(chip->dev, "Failed to get hap-sc-irq\n");
+ return chip->sc_irq;
+ }
+
+ chip->play_irq = platform_get_irq_byname(chip->pdev, "hap-play-irq");
+ if (chip->play_irq < 0) {
+ dev_err(chip->dev, "Failed to get hap-play-irq\n");
+ return chip->play_irq;
+ }
+
+ config->act_type = ACT_LRA;
+ rc = of_property_read_string(node, "qcom,actuator-type", &str);
+ if (!rc) {
+ if (strcmp(str, "erm") == 0) {
+ config->act_type = ACT_ERM;
+ } else if (strcmp(str, "lra") == 0) {
+ config->act_type = ACT_LRA;
+ } else {
+ dev_err(chip->dev, "Invalid actuator type: %s\n",
+ str);
+ return -EINVAL;
+ }
+ }
+
+ config->vmax_mv = HAP_VMAX_MV_DEFAULT;
+ rc = of_property_read_u32(node, "qcom,vmax-mv", &tmp);
+ if (!rc)
+ config->vmax_mv = (tmp > HAP_VMAX_MV_MAX) ?
+ HAP_VMAX_MV_MAX : tmp;
+
+ config->ilim_ma = HAP_ILIM_MA_DEFAULT;
+ rc = of_property_read_u32(node, "qcom,ilim-ma", &tmp);
+ if (!rc)
+ config->ilim_ma = (tmp >= HAP_ILIM_MA_MAX) ?
+ HAP_ILIM_MA_MAX : HAP_ILIM_MA_DEFAULT;
+
+ config->play_rate_us = HAP_PLAY_RATE_US_DEFAULT;
+ rc = of_property_read_u32(node, "qcom,play-rate-us", &tmp);
+ if (!rc)
+ config->play_rate_us = (tmp >= HAP_PLAY_RATE_US_MAX) ?
+ HAP_PLAY_RATE_US_MAX : tmp;
+
+ if (of_find_property(node, "qcom,external-waveform-source", NULL)) {
+ if (!of_property_read_string(node,
+ "qcom,external-waveform-source", &str)) {
+ if (strcmp(str, "audio") == 0) {
+ config->ext_src = EXT_WF_AUDIO;
+ } else if (strcmp(str, "pwm") == 0) {
+ config->ext_src = EXT_WF_PWM;
+ } else {
+ dev_err(chip->dev, "Invalid external waveform source: %s\n",
+ str);
+ return -EINVAL;
+ }
+ }
+ config->use_ext_wf_src = true;
+ }
+
+ if (of_find_property(node, "vdd-supply", NULL)) {
+ chip->vdd_supply = devm_regulator_get(chip->dev, "vdd");
+ if (IS_ERR(chip->vdd_supply)) {
+ rc = PTR_ERR(chip->vdd_supply);
+ if (rc != -EPROBE_DEFER)
+ dev_err(chip->dev, "Failed to get vdd regulator\n");
+ return rc;
+ }
+ }
+
+ rc = qti_haptics_lra_parse_dt(chip);
+ if (rc < 0)
+ return rc;
+
+ chip->constant.pattern = devm_kcalloc(chip->dev,
+ HAP_WAVEFORM_BUFFER_MAX,
+ sizeof(u8), GFP_KERNEL);
+ if (!chip->constant.pattern)
+ return -ENOMEM;
+
+ tmp = of_get_available_child_count(node);
+ if (tmp == 0)
+ return 0;
+
+ chip->predefined = devm_kcalloc(chip->dev, tmp,
+ sizeof(*chip->predefined), GFP_KERNEL);
+ if (!chip->predefined)
+ return -ENOMEM;
+
+ rc = qti_haptics_parse_dt_per_effect(chip);
+ if (rc < 0)
+ return rc;
+
+ chip->effects_count = tmp;
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int play_rate_dbgfs_read(void *data, u64 *val)
+{
+ struct qti_hap_effect *effect = (struct qti_hap_effect *)data;
+
+ *val = effect->play_rate_us;
+
+ return 0;
+}
+
+static int play_rate_dbgfs_write(void *data, u64 val)
+{
+ struct qti_hap_effect *effect = (struct qti_hap_effect *)data;
+
+ if (val > HAP_PLAY_RATE_US_MAX)
+ val = HAP_PLAY_RATE_US_MAX;
+
+ effect->play_rate_us = val;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(play_rate_debugfs_ops, play_rate_dbgfs_read,
+ play_rate_dbgfs_write, "%llu\n");
+
+static int vmax_dbgfs_read(void *data, u64 *val)
+{
+ struct qti_hap_effect *effect = (struct qti_hap_effect *)data;
+
+ *val = effect->vmax_mv;
+
+ return 0;
+}
+
+static int vmax_dbgfs_write(void *data, u64 val)
+{
+ struct qti_hap_effect *effect = (struct qti_hap_effect *)data;
+
+ if (val > HAP_VMAX_MV_MAX)
+ val = HAP_VMAX_MV_MAX;
+
+ effect->vmax_mv = val;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(vmax_debugfs_ops, vmax_dbgfs_read,
+ vmax_dbgfs_write, "%llu\n");
+
+static int wf_repeat_n_dbgfs_read(void *data, u64 *val)
+{
+ struct qti_hap_effect *effect = (struct qti_hap_effect *)data;
+
+ *val = wf_repeat[effect->wf_repeat_n];
+
+ return 0;
+}
+
+static int wf_repeat_n_dbgfs_write(void *data, u64 val)
+{
+ struct qti_hap_effect *effect = (struct qti_hap_effect *)data;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wf_repeat); i++)
+ if (val == wf_repeat[i])
+ break;
+
+ if (i == ARRAY_SIZE(wf_repeat))
+ pr_err("wf_repeat value %llu is invalid\n", val);
+ else
+ effect->wf_repeat_n = i;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(wf_repeat_n_debugfs_ops, wf_repeat_n_dbgfs_read,
+ wf_repeat_n_dbgfs_write, "%llu\n");
+
+static int wf_s_repeat_n_dbgfs_read(void *data, u64 *val)
+{
+ struct qti_hap_effect *effect = (struct qti_hap_effect *)data;
+
+ *val = wf_s_repeat[effect->wf_s_repeat_n];
+
+ return 0;
+}
+
+static int wf_s_repeat_n_dbgfs_write(void *data, u64 val)
+{
+ struct qti_hap_effect *effect = (struct qti_hap_effect *)data;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wf_s_repeat); i++)
+ if (val == wf_s_repeat[i])
+ break;
+
+ if (i == ARRAY_SIZE(wf_s_repeat))
+ pr_err("wf_s_repeat value %llu is invalid\n", val);
+ else
+ effect->wf_s_repeat_n = i;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(wf_s_repeat_n_debugfs_ops, wf_s_repeat_n_dbgfs_read,
+ wf_s_repeat_n_dbgfs_write, "%llu\n");
+
+static int auto_res_dbgfs_read(void *data, u64 *val)
+{
+ struct qti_hap_effect *effect = (struct qti_hap_effect *)data;
+
+ *val = !effect->lra_auto_res_disable;
+
+ return 0;
+}
+
+static int auto_res_dbgfs_write(void *data, u64 val)
+{
+ struct qti_hap_effect *effect = (struct qti_hap_effect *)data;
+
+ effect->lra_auto_res_disable = !val;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(auto_res_debugfs_ops, auto_res_dbgfs_read,
+ auto_res_dbgfs_write, "%llu\n");
+
+#define CHAR_PER_PATTERN 8
+static ssize_t brake_pattern_dbgfs_read(struct file *filep,
+ char __user *buf, size_t count, loff_t *ppos)
+{
+ struct qti_hap_effect *effect =
+ (struct qti_hap_effect *)filep->private_data;
+ char *kbuf, *tmp;
+ int rc, length, i, len;
+
+ kbuf = kcalloc(CHAR_PER_PATTERN, HAP_BRAKE_PATTERN_MAX, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ tmp = kbuf;
+ for (length = 0, i = 0; i < HAP_BRAKE_PATTERN_MAX; i++) {
+ len = snprintf(tmp, CHAR_PER_PATTERN, "0x%x ",
+ effect->brake[i]);
+ tmp += len;
+ length += len;
+ }
+
+ kbuf[length++] = '\n';
+ kbuf[length++] = '\0';
+
+ rc = simple_read_from_buffer(buf, count, ppos, kbuf, length);
+
+ kfree(kbuf);
+ return rc;
+}
+
+static ssize_t brake_pattern_dbgfs_write(struct file *filep,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ struct qti_hap_effect *effect =
+ (struct qti_hap_effect *)filep->private_data;
+ char *kbuf, *token;
+ int rc = 0, i = 0, j;
+ u32 val;
+
+ kbuf = kmalloc(count + 1, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ rc = copy_from_user(kbuf, buf, count);
+ if (rc > 0) {
+ rc = -EFAULT;
+ goto err;
+ }
+
+ kbuf[count] = '\0';
+ *ppos += count;
+
+ while ((token = strsep(&kbuf, " ")) != NULL) {
+ rc = kstrtouint(token, 0, &val);
+ if (rc < 0) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ effect->brake[i++] = val & HAP_BRAKE_PATTERN_MASK;
+
+ if (i >= HAP_BRAKE_PATTERN_MAX)
+ break;
+ }
+
+ for (j = i; j < HAP_BRAKE_PATTERN_MAX; j++)
+ effect->brake[j] = 0;
+
+ effect->brake_pattern_length = i;
+ verify_brake_setting(effect);
+
+ rc = count;
+err:
+ kfree(kbuf);
+ return rc;
+}
+
+static const struct file_operations brake_pattern_dbgfs_ops = {
+ .read = brake_pattern_dbgfs_read,
+ .write = brake_pattern_dbgfs_write,
+ .owner = THIS_MODULE,
+ .open = simple_open,
+};
+
+static ssize_t pattern_dbgfs_read(struct file *filep,
+ char __user *buf, size_t count, loff_t *ppos)
+{
+ struct qti_hap_effect *effect =
+ (struct qti_hap_effect *)filep->private_data;
+ char *kbuf, *tmp;
+ int rc, length, i, len;
+
+ kbuf = kcalloc(CHAR_PER_PATTERN, effect->pattern_length, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ tmp = kbuf;
+ for (length = 0, i = 0; i < effect->pattern_length; i++) {
+ len = snprintf(tmp, CHAR_PER_PATTERN, "0x%x ",
+ effect->pattern[i]);
+ tmp += len;
+ length += len;
+ }
+
+ kbuf[length++] = '\n';
+ kbuf[length++] = '\0';
+
+ rc = simple_read_from_buffer(buf, count, ppos, kbuf, length);
+
+ kfree(kbuf);
+ return rc;
+}
+
+static ssize_t pattern_dbgfs_write(struct file *filep,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ struct qti_hap_effect *effect =
+ (struct qti_hap_effect *)filep->private_data;
+ char *kbuf, *token;
+ int rc = 0, i = 0, j;
+ u32 val;
+
+ kbuf = kmalloc(count + 1, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ rc = copy_from_user(kbuf, buf, count);
+ if (rc > 0) {
+ rc = -EFAULT;
+ goto err;
+ }
+
+ kbuf[count] = '\0';
+ *ppos += count;
+
+ while ((token = strsep(&kbuf, " ")) != NULL) {
+ rc = kstrtouint(token, 0, &val);
+ if (rc < 0) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ effect->pattern[i++] = val & 0xff;
+
+ if (i >= effect->pattern_length)
+ break;
+ }
+
+ for (j = i; j < effect->pattern_length; j++)
+ effect->pattern[j] = 0;
+
+ rc = count;
+err:
+ kfree(kbuf);
+ return rc;
+}
+
+static const struct file_operations pattern_dbgfs_ops = {
+ .read = pattern_dbgfs_read,
+ .write = pattern_dbgfs_write,
+ .owner = THIS_MODULE,
+ .open = simple_open,
+};
+
+static int create_effect_debug_files(struct qti_hap_effect *effect,
+ struct dentry *dir)
+{
+ struct dentry *file;
+
+ file = debugfs_create_file("play_rate_us", 0644, dir,
+ effect, &play_rate_debugfs_ops);
+ if (!file) {
+ pr_err("create play-rate debugfs node failed\n");
+ return -ENOMEM;
+ }
+
+ file = debugfs_create_file("vmax_mv", 0644, dir,
+ effect, &vmax_debugfs_ops);
+ if (!file) {
+ pr_err("create vmax debugfs node failed\n");
+ return -ENOMEM;
+ }
+
+ file = debugfs_create_file("wf_repeat_n", 0644, dir,
+ effect, &wf_repeat_n_debugfs_ops);
+ if (!file) {
+ pr_err("create wf-repeat debugfs node failed\n");
+ return -ENOMEM;
+ }
+
+ file = debugfs_create_file("wf_s_repeat_n", 0644, dir,
+ effect, &wf_s_repeat_n_debugfs_ops);
+ if (!file) {
+ pr_err("create wf-s-repeat debugfs node failed\n");
+ return -ENOMEM;
+ }
+
+ file = debugfs_create_file("lra_auto_res_en", 0644, dir,
+ effect, &auto_res_debugfs_ops);
+ if (!file) {
+ pr_err("create lra-auto-res-en debugfs node failed\n");
+ return -ENOMEM;
+ }
+
+ file = debugfs_create_file("brake", 0644, dir,
+ effect, &brake_pattern_dbgfs_ops);
+ if (!file) {
+ pr_err("create brake debugfs node failed\n");
+ return -ENOMEM;
+ }
+
+ file = debugfs_create_file("pattern", 0644, dir,
+ effect, &pattern_dbgfs_ops);
+ if (!file) {
+ pr_err("create pattern debugfs node failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int qti_haptics_add_debugfs(struct qti_hap_chip *chip)
+{
+ struct dentry *hap_dir, *effect_dir;
+ char str[12] = {0};
+ int i, rc = 0;
+
+ hap_dir = debugfs_create_dir("haptics", NULL);
+ if (!hap_dir) {
+ pr_err("create haptics debugfs directory failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < chip->effects_count; i++) {
+ snprintf(str, ARRAY_SIZE(str), "effect%d", i);
+ effect_dir = debugfs_create_dir(str, hap_dir);
+ if (!effect_dir) {
+ pr_err("create %s debugfs directory failed\n", str);
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+
+ rc = create_effect_debug_files(&chip->predefined[i],
+ effect_dir);
+ if (rc < 0) {
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+ }
+
+ chip->hap_debugfs = hap_dir;
+ return 0;
+
+cleanup:
+ debugfs_remove_recursive(hap_dir);
+ return rc;
+}
+#endif
+
+static int qti_haptics_probe(struct platform_device *pdev)
+{
+ struct qti_hap_chip *chip;
+ struct input_dev *input_dev;
+ struct ff_device *ff;
+ int rc = 0, effect_count_max;
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!input_dev)
+ return -ENOMEM;
+
+ chip->pdev = pdev;
+ chip->dev = &pdev->dev;
+ chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
+ if (!chip->regmap) {
+ dev_err(chip->dev, "Failed to get regmap handle\n");
+ return -ENXIO;
+ }
+
+ rc = qti_haptics_parse_dt(chip);
+ if (rc < 0) {
+ dev_err(chip->dev, "parse device-tree failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ spin_lock_init(&chip->bus_lock);
+
+ rc = qti_haptics_hw_init(chip);
+ if (rc < 0) {
+ dev_err(chip->dev, "parse device-tree failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = devm_request_threaded_irq(chip->dev, chip->play_irq, NULL,
+ qti_haptics_play_irq_handler,
+ IRQF_ONESHOT, "hap_play_irq", chip);
+ if (rc < 0) {
+ dev_err(chip->dev, "request play-irq failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ disable_irq(chip->play_irq);
+ chip->play_irq_en = false;
+
+ rc = devm_request_threaded_irq(chip->dev, chip->sc_irq, NULL,
+ qti_haptics_sc_irq_handler,
+ IRQF_ONESHOT, "hap_sc_irq", chip);
+ if (rc < 0) {
+ dev_err(chip->dev, "request sc-irq failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ hrtimer_init(&chip->stop_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ chip->stop_timer.function = qti_hap_stop_timer;
+ hrtimer_init(&chip->hap_disable_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ chip->hap_disable_timer.function = qti_hap_disable_timer;
+ input_dev->name = "qti-haptics";
+ input_set_drvdata(input_dev, chip);
+ chip->input_dev = input_dev;
+
+ input_set_capability(input_dev, EV_FF, FF_CONSTANT);
+ input_set_capability(input_dev, EV_FF, FF_GAIN);
+ if (chip->effects_count != 0) {
+ input_set_capability(input_dev, EV_FF, FF_PERIODIC);
+ input_set_capability(input_dev, EV_FF, FF_CUSTOM);
+ }
+
+ if (chip->effects_count + 1 > FF_EFFECT_COUNT_MAX)
+ effect_count_max = chip->effects_count + 1;
+ else
+ effect_count_max = FF_EFFECT_COUNT_MAX;
+ rc = input_ff_create(input_dev, effect_count_max);
+ if (rc < 0) {
+ dev_err(chip->dev, "create FF input device failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ ff = input_dev->ff;
+ ff->upload = qti_haptics_upload_effect;
+ ff->playback = qti_haptics_playback;
+ ff->erase = qti_haptics_erase;
+ ff->set_gain = qti_haptics_set_gain;
+
+ rc = input_register_device(input_dev);
+ if (rc < 0) {
+ dev_err(chip->dev, "register input device failed, rc=%d\n",
+ rc);
+ goto destroy_ff;
+ }
+
+ dev_set_drvdata(chip->dev, chip);
+#ifdef CONFIG_DEBUG_FS
+ rc = qti_haptics_add_debugfs(chip);
+ if (rc < 0)
+ dev_dbg(chip->dev, "create debugfs failed, rc=%d\n", rc);
+#endif
+ return 0;
+
+destroy_ff:
+ input_ff_destroy(chip->input_dev);
+ return rc;
+}
+
+static int qti_haptics_remove(struct platform_device *pdev)
+{
+ struct qti_hap_chip *chip = dev_get_drvdata(&pdev->dev);
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(chip->hap_debugfs);
+#endif
+ input_ff_destroy(chip->input_dev);
+ dev_set_drvdata(chip->dev, NULL);
+
+ return 0;
+}
+
+static void qti_haptics_shutdown(struct platform_device *pdev)
+{
+ struct qti_hap_chip *chip = dev_get_drvdata(&pdev->dev);
+ int rc;
+
+ dev_dbg(chip->dev, "Shutdown!\n");
+
+ qti_haptics_module_en(chip, false);
+
+ if (chip->vdd_supply && chip->vdd_enabled) {
+ rc = regulator_disable(chip->vdd_supply);
+ if (rc < 0) {
+ dev_err(chip->dev, "Disable VDD supply failed, rc=%d\n",
+ rc);
+ return;
+ }
+ chip->vdd_enabled = false;
+ }
+}
+
+static const struct of_device_id haptics_match_table[] = {
+ { .compatible = "qcom,haptics" },
+ { .compatible = "qcom,pm660-haptics" },
+ { .compatible = "qcom,pm8150b-haptics" },
+ {},
+};
+
+static struct platform_driver qti_haptics_driver = {
+ .driver = {
+ .name = "qcom,haptics",
+ .of_match_table = haptics_match_table,
+ },
+ .probe = qti_haptics_probe,
+ .remove = qti_haptics_remove,
+ .shutdown = qti_haptics_shutdown,
+};
+module_platform_driver(qti_haptics_driver);
+
+MODULE_DESCRIPTION("QTI haptics driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 81f93f2..87e8325 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -756,6 +756,15 @@
To compile this driver as a module, choose M here: the module
will be called leds-nic78bx.
+config LEDS_QTI_TRI_LED
+ tristate "LED support for Qualcomm Technologies, Inc. TRI_LED"
+ depends on LEDS_CLASS && MFD_SPMI_PMIC && PWM && OF
+ help
+ This driver supports the TRI_LED module found in Qualcomm
+ Technologies, Inc. PMIC chips. TRI_LED supports 3 LED drivers
+ at max and each is controlled by a PWM channel used for dimming
+ or blinking.
+
config LEDS_QPNP_FLASH_V2
tristate "Support for QPNP V2 Flash LEDs"
depends on LEDS_CLASS && MFD_SPMI_PMIC
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 38b54a8..aab4e4d 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -78,6 +78,7 @@
obj-$(CONFIG_LEDS_LM3692X) += leds-lm3692x.o
obj-$(CONFIG_LEDS_SC27XX_BLTC) += leds-sc27xx-bltc.o
obj-$(CONFIG_LEDS_LM3601X) += leds-lm3601x.o
+obj-$(CONFIG_LEDS_QTI_TRI_LED) += leds-qti-tri-led.o
obj-$(CONFIG_LEDS_QPNP_FLASH_V2) += leds-qpnp-flash-v2.o
# LED SPI Drivers
diff --git a/drivers/leds/leds-qti-tri-led.c b/drivers/leds/leds-qti-tri-led.c
new file mode 100644
index 0000000..bcecc8d
--- /dev/null
+++ b/drivers/leds/leds-qti-tri-led.c
@@ -0,0 +1,642 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#define TRILED_REG_TYPE 0x04
+#define TRILED_REG_SUBTYPE 0x05
+#define TRILED_REG_EN_CTL 0x46
+
+/* TRILED_REG_EN_CTL */
+#define TRILED_EN_CTL_MASK GENMASK(7, 5)
+#define TRILED_EN_CTL_MAX_BIT 7
+
+#define TRILED_TYPE 0x19
+#define TRILED_SUBTYPE_LED3H0L12 0x02
+#define TRILED_SUBTYPE_LED2H0L12 0x03
+#define TRILED_SUBTYPE_LED1H2L12 0x04
+
+#define TRILED_NUM_MAX 3
+
+#define PWM_PERIOD_DEFAULT_NS 1000000
+
+struct pwm_setting {
+ u64 pre_period_ns;
+ u64 period_ns;
+ u64 duty_ns;
+};
+
+struct led_setting {
+ u64 on_ms;
+ u64 off_ms;
+ enum led_brightness brightness;
+ bool blink;
+ bool breath;
+};
+
+struct qpnp_led_dev {
+ struct led_classdev cdev;
+ struct pwm_device *pwm_dev;
+ struct pwm_setting pwm_setting;
+ struct led_setting led_setting;
+ struct qpnp_tri_led_chip *chip;
+ struct mutex lock;
+ const char *label;
+ const char *default_trigger;
+ u8 id;
+ bool blinking;
+ bool breathing;
+};
+
+struct qpnp_tri_led_chip {
+ struct device *dev;
+ struct regmap *regmap;
+ struct qpnp_led_dev *leds;
+ struct nvmem_device *pbs_nvmem;
+ struct mutex bus_lock;
+ int num_leds;
+ u16 reg_base;
+ u8 subtype;
+ u8 bitmap;
+};
+
+static int qpnp_tri_led_read(struct qpnp_tri_led_chip *chip, u16 addr, u8 *val)
+{
+ int rc;
+ unsigned int tmp;
+
+ mutex_lock(&chip->bus_lock);
+ rc = regmap_read(chip->regmap, chip->reg_base + addr, &tmp);
+ if (rc < 0)
+ dev_err(chip->dev, "Read addr 0x%x failed, rc=%d\n", addr, rc);
+ else
+ *val = (u8)tmp;
+ mutex_unlock(&chip->bus_lock);
+
+ return rc;
+}
+
+static int qpnp_tri_led_masked_write(struct qpnp_tri_led_chip *chip,
+ u16 addr, u8 mask, u8 val)
+{
+ int rc;
+
+ mutex_lock(&chip->bus_lock);
+ rc = regmap_update_bits(chip->regmap, chip->reg_base + addr, mask, val);
+ if (rc < 0)
+ dev_err(chip->dev, "Update addr 0x%x to val 0x%x with mask 0x%x failed, rc=%d\n",
+ addr, val, mask, rc);
+ mutex_unlock(&chip->bus_lock);
+
+ return rc;
+}
+
+static int __tri_led_config_pwm(struct qpnp_led_dev *led,
+ struct pwm_setting *pwm)
+{
+ struct pwm_state pstate;
+ int rc;
+
+ pwm_get_state(led->pwm_dev, &pstate);
+ pstate.enabled = !!(pwm->duty_ns != 0);
+ pstate.period = pwm->period_ns;
+ pstate.duty_cycle = pwm->duty_ns;
+ pstate.output_type = led->led_setting.breath ?
+ PWM_OUTPUT_MODULATED : PWM_OUTPUT_FIXED;
+ /* Use default pattern in PWM device */
+ pstate.output_pattern = NULL;
+ rc = pwm_apply_state(led->pwm_dev, &pstate);
+
+ if (rc < 0)
+ dev_err(led->chip->dev, "Apply PWM state for %s led failed, rc=%d\n",
+ led->cdev.name, rc);
+
+ return rc;
+}
+
+#define PBS_ENABLE 1
+#define PBS_DISABLE 2
+#define PBS_ARG 0x42
+#define PBS_TRIG_CLR 0xE6
+#define PBS_TRIG_SET 0xE5
+static int __tri_led_set(struct qpnp_led_dev *led)
+{
+ int rc = 0;
+ u8 val = 0, mask = 0, pbs_val;
+ u8 prev_bitmap;
+
+ rc = __tri_led_config_pwm(led, &led->pwm_setting);
+ if (rc < 0) {
+ dev_err(led->chip->dev, "Configure PWM for %s led failed, rc=%d\n",
+ led->cdev.name, rc);
+ return rc;
+ }
+
+ mask |= 1 << (TRILED_EN_CTL_MAX_BIT - led->id);
+
+ if (led->pwm_setting.duty_ns == 0)
+ val = 0;
+ else
+ val = mask;
+
+ if (led->chip->subtype == TRILED_SUBTYPE_LED2H0L12 &&
+ led->chip->pbs_nvmem) {
+ /*
+ * Control BOB_CONFIG_EXT_CTRL2_FORCE_EN for HR_LED through
+ * PBS trigger. PBS trigger for enable happens if any one of
+ * LEDs are turned on. PBS trigger for disable happens only
+ * if both LEDs are turned off.
+ */
+
+ prev_bitmap = led->chip->bitmap;
+ if (val)
+ led->chip->bitmap |= (1 << led->id);
+ else
+ led->chip->bitmap &= ~(1 << led->id);
+
+ if (!(led->chip->bitmap & prev_bitmap)) {
+ pbs_val = led->chip->bitmap ? PBS_ENABLE : PBS_DISABLE;
+ rc = nvmem_device_write(led->chip->pbs_nvmem, PBS_ARG,
+ 1, &pbs_val);
+ if (rc < 0) {
+ dev_err(led->chip->dev, "Couldn't set PBS_ARG, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ pbs_val = 1;
+ rc = nvmem_device_write(led->chip->pbs_nvmem,
+ PBS_TRIG_CLR, 1, &pbs_val);
+ if (rc < 0) {
+ dev_err(led->chip->dev, "Couldn't set PBS_TRIG_CLR, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ pbs_val = 1;
+ rc = nvmem_device_write(led->chip->pbs_nvmem,
+ PBS_TRIG_SET, 1, &pbs_val);
+ if (rc < 0) {
+ dev_err(led->chip->dev, "Couldn't set PBS_TRIG_SET, rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+ }
+
+ rc = qpnp_tri_led_masked_write(led->chip, TRILED_REG_EN_CTL,
+ mask, val);
+ if (rc < 0)
+ dev_err(led->chip->dev, "Update addr 0x%x failed, rc=%d\n",
+ TRILED_REG_EN_CTL, rc);
+
+ return rc;
+}
+
+static int qpnp_tri_led_set(struct qpnp_led_dev *led)
+{
+ u64 on_ms, off_ms, period_ns, duty_ns;
+ enum led_brightness brightness = led->led_setting.brightness;
+ int rc = 0;
+
+ if (led->led_setting.blink) {
+ on_ms = led->led_setting.on_ms;
+ off_ms = led->led_setting.off_ms;
+
+ duty_ns = on_ms * NSEC_PER_MSEC;
+ period_ns = (on_ms + off_ms) * NSEC_PER_MSEC;
+
+ if (period_ns < duty_ns && duty_ns != 0)
+ period_ns = duty_ns + 1;
+ } else {
+ /* Use initial period if no blinking is required */
+ period_ns = led->pwm_setting.pre_period_ns;
+
+ if (brightness == LED_OFF)
+ duty_ns = 0;
+
+ duty_ns = period_ns * brightness;
+ do_div(duty_ns, LED_FULL);
+
+ if (period_ns < duty_ns && duty_ns != 0)
+ period_ns = duty_ns + 1;
+ }
+ dev_dbg(led->chip->dev, "PWM settings for %s led: period = %lluns, duty = %lluns\n",
+ led->cdev.name, period_ns, duty_ns);
+
+ led->pwm_setting.duty_ns = duty_ns;
+ led->pwm_setting.period_ns = period_ns;
+
+ rc = __tri_led_set(led);
+ if (rc < 0) {
+ dev_err(led->chip->dev, "__tri_led_set %s failed, rc=%d\n",
+ led->cdev.name, rc);
+ return rc;
+ }
+
+ if (led->led_setting.blink) {
+ led->cdev.brightness = LED_FULL;
+ led->blinking = true;
+ led->breathing = false;
+ } else if (led->led_setting.breath) {
+ led->cdev.brightness = LED_FULL;
+ led->blinking = false;
+ led->breathing = true;
+ } else {
+ led->cdev.brightness = led->led_setting.brightness;
+ led->blinking = false;
+ led->breathing = false;
+ }
+
+ return rc;
+}
+
+static int qpnp_tri_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct qpnp_led_dev *led =
+ container_of(led_cdev, struct qpnp_led_dev, cdev);
+ int rc = 0;
+
+ mutex_lock(&led->lock);
+ if (brightness > LED_FULL)
+ brightness = LED_FULL;
+
+ if (brightness == led->led_setting.brightness &&
+ !led->blinking && !led->breathing) {
+ mutex_unlock(&led->lock);
+ return 0;
+ }
+
+ led->led_setting.brightness = brightness;
+ if (!!brightness)
+ led->led_setting.off_ms = 0;
+ else
+ led->led_setting.on_ms = 0;
+ led->led_setting.blink = false;
+ led->led_setting.breath = false;
+
+ rc = qpnp_tri_led_set(led);
+ if (rc)
+ dev_err(led->chip->dev, "Set led failed for %s, rc=%d\n",
+ led->label, rc);
+
+ mutex_unlock(&led->lock);
+
+ return rc;
+}
+
+static enum led_brightness qpnp_tri_led_get_brightness(
+ struct led_classdev *led_cdev)
+{
+ return led_cdev->brightness;
+}
+
+static int qpnp_tri_led_set_blink(struct led_classdev *led_cdev,
+ unsigned long *on_ms, unsigned long *off_ms)
+{
+ struct qpnp_led_dev *led =
+ container_of(led_cdev, struct qpnp_led_dev, cdev);
+ int rc = 0;
+
+ mutex_lock(&led->lock);
+ if (led->blinking && *on_ms == led->led_setting.on_ms &&
+ *off_ms == led->led_setting.off_ms) {
+ dev_dbg(led_cdev->dev, "Ignore, on/off setting is not changed: on %lums, off %lums\n",
+ *on_ms, *off_ms);
+ mutex_unlock(&led->lock);
+ return 0;
+ }
+
+ if (*on_ms == 0) {
+ led->led_setting.blink = false;
+ led->led_setting.breath = false;
+ led->led_setting.brightness = LED_OFF;
+ } else if (*off_ms == 0) {
+ led->led_setting.blink = false;
+ led->led_setting.breath = false;
+ led->led_setting.brightness = led->cdev.brightness;
+ } else {
+ led->led_setting.on_ms = *on_ms;
+ led->led_setting.off_ms = *off_ms;
+ led->led_setting.blink = true;
+ led->led_setting.breath = false;
+ }
+
+ rc = qpnp_tri_led_set(led);
+ if (rc)
+ dev_err(led->chip->dev, "Set led failed for %s, rc=%d\n",
+ led->label, rc);
+
+ mutex_unlock(&led->lock);
+ return rc;
+}
+
+static ssize_t breath_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct qpnp_led_dev *led =
+ container_of(led_cdev, struct qpnp_led_dev, cdev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", led->led_setting.breath);
+}
+
+static ssize_t breath_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+ bool breath;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct qpnp_led_dev *led =
+ container_of(led_cdev, struct qpnp_led_dev, cdev);
+
+ rc = kstrtobool(buf, &breath);
+ if (rc < 0)
+ return rc;
+
+ mutex_lock(&led->lock);
+ if (led->breathing == breath)
+ goto unlock;
+
+ led->led_setting.blink = false;
+ led->led_setting.breath = breath;
+ led->led_setting.brightness = breath ? LED_FULL : LED_OFF;
+ rc = qpnp_tri_led_set(led);
+ if (rc < 0)
+ dev_err(led->chip->dev, "Set led failed for %s, rc=%d\n",
+ led->label, rc);
+
+unlock:
+ mutex_unlock(&led->lock);
+ return (rc < 0) ? rc : count;
+}
+
+static DEVICE_ATTR_RW(breath);
+static const struct attribute *breath_attrs[] = {
+ &dev_attr_breath.attr,
+ NULL
+};
+
+static int qpnp_tri_led_register(struct qpnp_tri_led_chip *chip)
+{
+ struct qpnp_led_dev *led;
+ int rc, i, j;
+
+ for (i = 0; i < chip->num_leds; i++) {
+ led = &chip->leds[i];
+ mutex_init(&led->lock);
+ led->cdev.name = led->label;
+ led->cdev.max_brightness = LED_FULL;
+ led->cdev.brightness_set_blocking = qpnp_tri_led_set_brightness;
+ led->cdev.brightness_get = qpnp_tri_led_get_brightness;
+ led->cdev.blink_set = qpnp_tri_led_set_blink;
+ led->cdev.default_trigger = led->default_trigger;
+ led->cdev.brightness = LED_OFF;
+ led->cdev.flags |= LED_KEEP_TRIGGER;
+
+ rc = devm_led_classdev_register(chip->dev, &led->cdev);
+ if (rc < 0) {
+ dev_err(chip->dev, "%s led class device registering failed, rc=%d\n",
+ led->label, rc);
+ goto err_out;
+ }
+
+ if (pwm_get_output_type_supported(led->pwm_dev)
+ & PWM_OUTPUT_MODULATED) {
+ rc = sysfs_create_files(&led->cdev.dev->kobj,
+ breath_attrs);
+ if (rc < 0) {
+ dev_err(chip->dev, "Create breath file for %s led failed, rc=%d\n",
+ led->label, rc);
+ goto err_out;
+ }
+ }
+ }
+
+ return 0;
+
+err_out:
+ for (j = 0; j <= i; j++) {
+ if (j < i)
+ sysfs_remove_files(&chip->leds[j].cdev.dev->kobj,
+ breath_attrs);
+ mutex_destroy(&chip->leds[j].lock);
+ }
+ return rc;
+}
+
+static int qpnp_tri_led_hw_init(struct qpnp_tri_led_chip *chip)
+{
+ int rc = 0;
+ u8 val;
+
+ rc = qpnp_tri_led_read(chip, TRILED_REG_TYPE, &val);
+ if (rc < 0) {
+ dev_err(chip->dev, "Read REG_TYPE failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (val != TRILED_TYPE) {
+ dev_err(chip->dev, "invalid subtype(%d)\n", val);
+ return -ENODEV;
+ }
+
+ rc = qpnp_tri_led_read(chip, TRILED_REG_SUBTYPE, &val);
+ if (rc < 0) {
+ dev_err(chip->dev, "Read REG_SUBTYPE failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ chip->subtype = val;
+
+ return 0;
+}
+
+static int qpnp_tri_led_parse_dt(struct qpnp_tri_led_chip *chip)
+{
+ struct device_node *node = chip->dev->of_node, *child_node;
+ struct qpnp_led_dev *led;
+ struct pwm_args pargs;
+ const __be32 *addr;
+ int rc = 0, id, i = 0;
+
+ addr = of_get_address(chip->dev->of_node, 0, NULL, NULL);
+ if (!addr) {
+ dev_err(chip->dev, "Getting address failed\n");
+ return -EINVAL;
+ }
+ chip->reg_base = be32_to_cpu(addr[0]);
+
+ chip->num_leds = of_get_available_child_count(node);
+ if (chip->num_leds == 0) {
+ dev_err(chip->dev, "No led child node defined\n");
+ return -ENODEV;
+ }
+
+ if (chip->num_leds > TRILED_NUM_MAX) {
+ dev_err(chip->dev, "can't support %d leds(max %d)\n",
+ chip->num_leds, TRILED_NUM_MAX);
+ return -EINVAL;
+ }
+
+ if (of_find_property(chip->dev->of_node, "nvmem", NULL)) {
+ chip->pbs_nvmem = devm_nvmem_device_get(chip->dev, "pbs_sdam");
+ if (IS_ERR_OR_NULL(chip->pbs_nvmem)) {
+ rc = PTR_ERR(chip->pbs_nvmem);
+ if (rc != -EPROBE_DEFER) {
+ dev_err(chip->dev, "Couldn't get nvmem device, rc=%d\n",
+ rc);
+ return -ENODEV;
+ }
+ chip->pbs_nvmem = NULL;
+ return rc;
+ }
+ }
+
+ chip->leds = devm_kcalloc(chip->dev, chip->num_leds,
+ sizeof(struct qpnp_led_dev), GFP_KERNEL);
+ if (!chip->leds)
+ return -ENOMEM;
+
+ for_each_available_child_of_node(node, child_node) {
+ rc = of_property_read_u32(child_node, "led-sources", &id);
+ if (rc) {
+ dev_err(chip->dev, "Get led-sources failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (id >= TRILED_NUM_MAX) {
+ dev_err(chip->dev, "only support 0~%d current source\n",
+ TRILED_NUM_MAX - 1);
+ return -EINVAL;
+ }
+
+ led = &chip->leds[i++];
+ led->chip = chip;
+ led->id = id;
+ led->label =
+ of_get_property(child_node, "label", NULL) ? :
+ child_node->name;
+
+ led->pwm_dev =
+ devm_of_pwm_get(chip->dev, child_node, NULL);
+ if (IS_ERR(led->pwm_dev)) {
+ rc = PTR_ERR(led->pwm_dev);
+ if (rc != -EPROBE_DEFER)
+ dev_err(chip->dev, "Get pwm device for %s led failed, rc=%d\n",
+ led->label, rc);
+ return rc;
+ }
+
+ pwm_get_args(led->pwm_dev, &pargs);
+ if (pargs.period == 0)
+ led->pwm_setting.pre_period_ns = PWM_PERIOD_DEFAULT_NS;
+ else
+ led->pwm_setting.pre_period_ns = pargs.period;
+
+ led->default_trigger = of_get_property(child_node,
+ "linux,default-trigger", NULL);
+ }
+
+ return rc;
+}
+
+static int qpnp_tri_led_probe(struct platform_device *pdev)
+{
+ struct qpnp_tri_led_chip *chip;
+ int rc = 0;
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->dev = &pdev->dev;
+ chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
+ if (!chip->regmap) {
+ dev_err(chip->dev, "Getting regmap failed\n");
+ return -EINVAL;
+ }
+
+ rc = qpnp_tri_led_parse_dt(chip);
+ if (rc < 0) {
+ dev_err(chip->dev, "Devicetree properties parsing failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ mutex_init(&chip->bus_lock);
+
+ rc = qpnp_tri_led_hw_init(chip);
+ if (rc) {
+ dev_err(chip->dev, "HW initialization failed, rc=%d\n", rc);
+ goto destroy;
+ }
+
+ dev_set_drvdata(chip->dev, chip);
+ rc = qpnp_tri_led_register(chip);
+ if (rc < 0) {
+ dev_err(chip->dev, "Registering LED class devices failed, rc=%d\n",
+ rc);
+ goto destroy;
+ }
+
+ dev_dbg(chip->dev, "Tri-led module with subtype 0x%x is detected\n",
+ chip->subtype);
+ return 0;
+destroy:
+ mutex_destroy(&chip->bus_lock);
+ dev_set_drvdata(chip->dev, NULL);
+
+ return rc;
+}
+
+static int qpnp_tri_led_remove(struct platform_device *pdev)
+{
+ int i;
+ struct qpnp_tri_led_chip *chip = dev_get_drvdata(&pdev->dev);
+
+ mutex_destroy(&chip->bus_lock);
+ for (i = 0; i < chip->num_leds; i++) {
+ sysfs_remove_files(&chip->leds[i].cdev.dev->kobj, breath_attrs);
+ mutex_destroy(&chip->leds[i].lock);
+ }
+ dev_set_drvdata(chip->dev, NULL);
+ return 0;
+}
+
+static const struct of_device_id qpnp_tri_led_of_match[] = {
+ { .compatible = "qcom,tri-led",},
+ { },
+};
+
+static struct platform_driver qpnp_tri_led_driver = {
+ .driver = {
+ .name = "qcom,tri-led",
+ .of_match_table = qpnp_tri_led_of_match,
+ },
+ .probe = qpnp_tri_led_probe,
+ .remove = qpnp_tri_led_remove,
+};
+module_platform_driver(qpnp_tri_led_driver);
+
+MODULE_DESCRIPTION("QTI TRI_LED driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("leds:qpnp-tri-led");
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 0a7a470e..b7fc88c 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -90,6 +90,14 @@
This driver can also be built as a module. If so, the module
will be called nvmem_qfprom.
+config NVMEM_SPMI_SDAM
+ tristate "SPMI SDAM Support"
+ depends on SPMI
+ help
+ This driver supports the Shared Direct Access Memory Module on
+ Qualcomm Technologies, Inc. PMICs. It provides the clients
+ an interface to read/write to the SDAM module's shared memory.
+
config ROCKCHIP_EFUSE
tristate "Rockchip eFuse Support"
depends on ARCH_ROCKCHIP || COMPILE_TEST
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 4e8c616..850cd54 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -23,6 +23,7 @@
nvmem_mtk-efuse-y := mtk-efuse.o
obj-$(CONFIG_QCOM_QFPROM) += nvmem_qfprom.o
nvmem_qfprom-y := qfprom.o
+obj-$(CONFIG_NVMEM_SPMI_SDAM) += qcom-spmi-sdam.o
obj-$(CONFIG_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o
nvmem_rockchip_efuse-y := rockchip-efuse.o
obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o
diff --git a/drivers/nvmem/qcom-spmi-sdam.c b/drivers/nvmem/qcom-spmi-sdam.c
new file mode 100644
index 0000000..b3f32d3
--- /dev/null
+++ b/drivers/nvmem/qcom-spmi-sdam.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/nvmem-provider.h>
+#include <linux/regmap.h>
+
+#define SDAM_MEM_START 0x40
+#define REGISTER_MAP_ID 0x40
+#define REGISTER_MAP_VERSION 0x41
+#define SDAM_SIZE 0x44
+#define SDAM_PBS_TRIG_SET 0xE5
+#define SDAM_PBS_TRIG_CLR 0xE6
+
+struct sdam_chip {
+ struct platform_device *pdev;
+ struct regmap *regmap;
+ int base;
+ int size;
+};
+
+/* read only register offsets */
+static const u8 sdam_ro_map[] = {
+ REGISTER_MAP_ID,
+ REGISTER_MAP_VERSION,
+ SDAM_SIZE
+};
+
+static bool is_valid(struct sdam_chip *sdam, unsigned int offset, size_t len)
+{
+ int sdam_mem_end = SDAM_MEM_START + sdam->size - 1;
+
+ if (!len)
+ return false;
+
+ if (offset >= SDAM_MEM_START && offset <= sdam_mem_end
+ && (offset + len - 1) <= sdam_mem_end)
+ return true;
+ else if ((offset == SDAM_PBS_TRIG_SET || offset == SDAM_PBS_TRIG_CLR)
+ && (len == 1))
+ return true;
+
+ return false;
+}
+
+static bool is_ro(unsigned int offset, size_t len)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sdam_ro_map); i++)
+ if (offset <= sdam_ro_map[i] && (offset + len) > sdam_ro_map[i])
+ return true;
+
+ return false;
+}
+
+static int sdam_read(void *priv, unsigned int offset, void *val, size_t bytes)
+{
+ struct sdam_chip *sdam = priv;
+ int rc;
+
+ if (!is_valid(sdam, offset, bytes)) {
+ pr_err("Invalid SDAM offset 0x%02x len=%zd\n", offset, bytes);
+ return -EINVAL;
+ }
+
+ rc = regmap_bulk_read(sdam->regmap, sdam->base + offset, val, bytes);
+ if (rc < 0)
+ pr_err("Failed to read SDAM offset 0x%02x len=%zd, rc=%d\n",
+ offset, bytes, rc);
+
+ return rc;
+}
+
+static int sdam_write(void *priv, unsigned int offset, void *val, size_t bytes)
+{
+ struct sdam_chip *sdam = priv;
+ int rc;
+
+ if (!is_valid(sdam, offset, bytes)) {
+ pr_err("Invalid SDAM offset 0x%02x len=%zd\n", offset, bytes);
+ return -EINVAL;
+ }
+
+ if (is_ro(offset, bytes)) {
+ pr_err("Invalid write offset 0x%02x len=%zd\n", offset, bytes);
+ return -EINVAL;
+ }
+
+ rc = regmap_bulk_write(sdam->regmap, sdam->base + offset, val, bytes);
+ if (rc < 0)
+ pr_err("Failed to write SDAM offset 0x%02x len=%zd, rc=%d\n",
+ offset, bytes, rc);
+
+ return rc;
+}
+
+static int sdam_probe(struct platform_device *pdev)
+{
+ struct sdam_chip *sdam;
+ struct nvmem_device *nvmem;
+ struct nvmem_config *sdam_config;
+ unsigned int val = 0;
+ int rc;
+
+ sdam = devm_kzalloc(&pdev->dev, sizeof(*sdam), GFP_KERNEL);
+ if (!sdam)
+ return -ENOMEM;
+
+ sdam_config = devm_kzalloc(&pdev->dev, sizeof(*sdam_config),
+ GFP_KERNEL);
+ if (!sdam_config)
+ return -ENOMEM;
+
+ sdam->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!sdam->regmap) {
+ pr_err("Failed to get regmap handle\n");
+ return -ENXIO;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node, "reg", &sdam->base);
+ if (rc < 0) {
+ pr_err("Failed to get SDAM base, rc=%d\n", rc);
+ return -EINVAL;
+ }
+
+ rc = regmap_read(sdam->regmap, sdam->base + SDAM_SIZE, &val);
+ if (rc < 0) {
+ pr_err("Failed to read SDAM_SIZE rc=%d\n", rc);
+ return -EINVAL;
+ }
+ sdam->size = val * 32;
+
+ sdam_config->dev = &pdev->dev;
+ sdam_config->name = "spmi_sdam";
+ sdam_config->id = pdev->id;
+ sdam_config->owner = THIS_MODULE,
+ sdam_config->stride = 1;
+ sdam_config->word_size = 1;
+ sdam_config->reg_read = sdam_read;
+ sdam_config->reg_write = sdam_write;
+ sdam_config->priv = sdam;
+
+ nvmem = nvmem_register(sdam_config);
+ if (IS_ERR(nvmem)) {
+ pr_err("Failed to register SDAM nvmem device rc=%ld\n",
+ PTR_ERR(nvmem));
+ return -ENXIO;
+ }
+ platform_set_drvdata(pdev, nvmem);
+
+ pr_info("SDAM base=0x%04x size=%d registered successfully\n",
+ sdam->base, sdam->size);
+
+ return 0;
+}
+
+static int sdam_remove(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+ return nvmem_unregister(nvmem);
+}
+
+static const struct of_device_id sdam_match_table[] = {
+ {.compatible = "qcom,spmi-sdam"},
+ {},
+};
+
+static struct platform_driver sdam_driver = {
+ .driver = {
+ .name = "qcom,spmi-sdam",
+ .of_match_table = sdam_match_table,
+ },
+ .probe = sdam_probe,
+ .remove = sdam_remove,
+};
+
+static int __init sdam_init(void)
+{
+ return platform_driver_register(&sdam_driver);
+}
+subsys_initcall(sdam_init);
+
+static void __exit sdam_exit(void)
+{
+ return platform_driver_unregister(&sdam_driver);
+}
+module_exit(sdam_exit);
+
+MODULE_DESCRIPTION("QCOM SPMI SDAM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 2b83a41..dca653d 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -109,4 +109,9 @@
config OF_NUMA
bool
+config OF_BATTERYDATA
+ def_bool y
+ help
+ OpenFirmware BatteryData accessors
+
endif # OF
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index 663a4af..2ae109c 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -14,5 +14,6 @@
obj-$(CONFIG_OF_RESOLVE) += resolver.o
obj-$(CONFIG_OF_OVERLAY) += overlay.o
obj-$(CONFIG_OF_NUMA) += of_numa.o
+obj-$(CONFIG_OF_BATTERYDATA) += of_batterydata.o
obj-$(CONFIG_OF_UNITTEST) += unittest-data/
diff --git a/drivers/of/of_batterydata.c b/drivers/of/of_batterydata.c
new file mode 100644
index 0000000..5fe2b00
--- /dev/null
+++ b/drivers/of/of_batterydata.c
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/batterydata-lib.h>
+#include <linux/power_supply.h>
+
+static int of_batterydata_read_lut(const struct device_node *np,
+ int max_cols, int max_rows, int *ncols, int *nrows,
+ int *col_legend_data, int *row_legend_data,
+ int *lut_data)
+{
+ struct property *prop;
+ const __be32 *data;
+ int cols, rows, size, i, j, *out_values;
+
+ prop = of_find_property(np, "qcom,lut-col-legend", NULL);
+ if (!prop) {
+ pr_err("%s: No col legend found\n", np->name);
+ return -EINVAL;
+ } else if (!prop->value) {
+ pr_err("%s: No col legend value found, np->name\n", np->name);
+ return -ENODATA;
+ } else if (prop->length > max_cols * sizeof(int)) {
+ pr_err("%s: Too many columns\n", np->name);
+ return -EINVAL;
+ }
+
+ cols = prop->length/sizeof(int);
+ *ncols = cols;
+ data = prop->value;
+ for (i = 0; i < cols; i++)
+ *col_legend_data++ = be32_to_cpup(data++);
+
+ rows = 0;
+
+ prop = of_find_property(np, "qcom,lut-row-legend", NULL);
+ if (!prop || row_legend_data == NULL) {
+ /* single row lut */
+ rows = 1;
+ } else if (!prop->value) {
+ pr_err("%s: No row legend value found\n", np->name);
+ return -ENODATA;
+ } else if (prop->length > max_rows * sizeof(int)) {
+ pr_err("%s: Too many rows\n", np->name);
+ return -EINVAL;
+ }
+
+ if (rows != 1) {
+ rows = prop->length/sizeof(int);
+ *nrows = rows;
+ data = prop->value;
+ for (i = 0; i < rows; i++)
+ *row_legend_data++ = be32_to_cpup(data++);
+ }
+
+ prop = of_find_property(np, "qcom,lut-data", NULL);
+ if (!prop) {
+ pr_err("prop 'qcom,lut-data' not found\n");
+ return -EINVAL;
+ }
+ data = prop->value;
+ size = prop->length/sizeof(int);
+ if (size != cols * rows) {
+ pr_err("%s: data size mismatch, %dx%d != %d\n",
+ np->name, cols, rows, size);
+ return -EINVAL;
+ }
+ for (i = 0; i < rows; i++) {
+ out_values = lut_data + (max_cols * i);
+ for (j = 0; j < cols; j++) {
+ *out_values++ = be32_to_cpup(data++);
+ pr_debug("Value = %d\n", *(out_values-1));
+ }
+ }
+
+ return 0;
+}
+
+static int of_batterydata_read_sf_lut(struct device_node *data_node,
+ const char *name, struct sf_lut *lut)
+{
+ struct device_node *node = of_find_node_by_name(data_node, name);
+ int rc;
+
+ if (!lut) {
+ pr_debug("No lut provided, skipping\n");
+ return 0;
+ } else if (!node) {
+ pr_err("Couldn't find %s node.\n", name);
+ return -EINVAL;
+ }
+
+ rc = of_batterydata_read_lut(node, PC_CC_COLS, PC_CC_ROWS,
+ &lut->cols, &lut->rows, lut->row_entries,
+ lut->percent, *lut->sf);
+ if (rc) {
+ pr_err("Failed to read %s node.\n", name);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int of_batterydata_read_pc_temp_ocv_lut(struct device_node *data_node,
+ const char *name, struct pc_temp_ocv_lut *lut)
+{
+ struct device_node *node = of_find_node_by_name(data_node, name);
+ int rc;
+
+ if (!lut) {
+ pr_debug("No lut provided, skipping\n");
+ return 0;
+ } else if (!node) {
+ pr_err("Couldn't find %s node.\n", name);
+ return -EINVAL;
+ }
+ rc = of_batterydata_read_lut(node, PC_TEMP_COLS, PC_TEMP_ROWS,
+ &lut->cols, &lut->rows, lut->temp, lut->percent,
+ *lut->ocv);
+ if (rc) {
+ pr_err("Failed to read %s node.\n", name);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int of_batterydata_read_ibat_temp_acc_lut(struct device_node *data_node,
+ const char *name, struct ibat_temp_acc_lut *lut)
+{
+ struct device_node *node = of_find_node_by_name(data_node, name);
+ int rc;
+
+ if (!lut) {
+ pr_debug("No lut provided, skipping\n");
+ return 0;
+ } else if (!node) {
+ pr_debug("Couldn't find %s node.\n", name);
+ return 0;
+ }
+ rc = of_batterydata_read_lut(node, ACC_TEMP_COLS, ACC_IBAT_ROWS,
+ &lut->cols, &lut->rows, lut->temp, lut->ibat,
+ *lut->acc);
+ if (rc) {
+ pr_err("Failed to read %s node.\n", name);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int of_batterydata_read_single_row_lut(struct device_node *data_node,
+ const char *name, struct single_row_lut *lut)
+{
+ struct device_node *node = of_find_node_by_name(data_node, name);
+ int rc;
+
+ if (!lut) {
+ pr_debug("No lut provided, skipping\n");
+ return 0;
+ } else if (!node) {
+ pr_err("Couldn't find %s node.\n", name);
+ return -EINVAL;
+ }
+
+ rc = of_batterydata_read_lut(node, MAX_SINGLE_LUT_COLS, 1,
+ &lut->cols, NULL, lut->x, NULL, lut->y);
+ if (rc) {
+ pr_err("Failed to read %s node.\n", name);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int of_batterydata_read_batt_id_kohm(const struct device_node *np,
+ const char *propname, struct batt_ids *batt_ids)
+{
+ struct property *prop;
+ const __be32 *data;
+ int num, i, *id_kohm = batt_ids->kohm;
+
+ prop = of_find_property(np, "qcom,batt-id-kohm", NULL);
+ if (!prop) {
+ pr_err("%s: No battery id resistor found\n", np->name);
+ return -EINVAL;
+ } else if (!prop->value) {
+ pr_err("%s: No battery id resistor value found, np->name\n",
+ np->name);
+ return -ENODATA;
+ } else if (prop->length > MAX_BATT_ID_NUM * sizeof(__be32)) {
+ pr_err("%s: Too many battery id resistors\n", np->name);
+ return -EINVAL;
+ }
+
+ num = prop->length/sizeof(__be32);
+ batt_ids->num = num;
+ data = prop->value;
+ for (i = 0; i < num; i++)
+ *id_kohm++ = be32_to_cpup(data++);
+
+ return 0;
+}
+
+#define OF_PROP_READ(property, qpnp_dt_property, node, rc, optional) \
+do { \
+ if (rc) \
+ break; \
+ rc = of_property_read_u32(node, "qcom," qpnp_dt_property, \
+ &property); \
+ \
+ if ((rc == -EINVAL) && optional) { \
+ property = -EINVAL; \
+ rc = 0; \
+ } else if (rc) { \
+ pr_err("Error reading " #qpnp_dt_property \
+ " property rc = %d\n", rc); \
+ } \
+} while (0)
+
+static int of_batterydata_load_battery_data(struct device_node *node,
+ int best_id_kohm,
+ struct bms_battery_data *batt_data)
+{
+ int rc;
+
+ rc = of_batterydata_read_single_row_lut(node, "qcom,fcc-temp-lut",
+ batt_data->fcc_temp_lut);
+ if (rc)
+ return rc;
+
+ rc = of_batterydata_read_pc_temp_ocv_lut(node,
+ "qcom,pc-temp-ocv-lut",
+ batt_data->pc_temp_ocv_lut);
+ if (rc)
+ return rc;
+
+ rc = of_batterydata_read_sf_lut(node, "qcom,rbatt-sf-lut",
+ batt_data->rbatt_sf_lut);
+ if (rc)
+ return rc;
+
+ rc = of_batterydata_read_ibat_temp_acc_lut(node, "qcom,ibat-acc-lut",
+ batt_data->ibat_acc_lut);
+ if (rc)
+ return rc;
+
+ rc = of_property_read_string(node, "qcom,battery-type",
+ &batt_data->battery_type);
+ if (rc) {
+ pr_err("Error reading qcom,battery-type property rc=%d\n", rc);
+ batt_data->battery_type = NULL;
+ return rc;
+ }
+
+ OF_PROP_READ(batt_data->fcc, "fcc-mah", node, rc, false);
+ OF_PROP_READ(batt_data->default_rbatt_mohm,
+ "default-rbatt-mohm", node, rc, false);
+ OF_PROP_READ(batt_data->rbatt_capacitive_mohm,
+ "rbatt-capacitive-mohm", node, rc, false);
+ OF_PROP_READ(batt_data->flat_ocv_threshold_uv,
+ "flat-ocv-threshold-uv", node, rc, true);
+ OF_PROP_READ(batt_data->max_voltage_uv,
+ "max-voltage-uv", node, rc, true);
+ OF_PROP_READ(batt_data->cutoff_uv, "v-cutoff-uv", node, rc, true);
+ OF_PROP_READ(batt_data->iterm_ua, "chg-term-ua", node, rc, true);
+ OF_PROP_READ(batt_data->fastchg_current_ma,
+ "fastchg-current-ma", node, rc, true);
+ OF_PROP_READ(batt_data->fg_cc_cv_threshold_mv,
+ "fg-cc-cv-threshold-mv", node, rc, true);
+
+ batt_data->batt_id_kohm = best_id_kohm;
+
+ return rc;
+}
+
+static int64_t of_batterydata_convert_battery_id_kohm(int batt_id_uv,
+ int rpull_up, int vadc_vdd)
+{
+ int64_t resistor_value_kohm, denom;
+
+ if (batt_id_uv == 0) {
+ /* vadc not correct or batt id line grounded, report 0 kohms */
+ return 0;
+ }
+ /* calculate the battery id resistance reported via ADC */
+ denom = div64_s64(vadc_vdd * 1000000LL, batt_id_uv) - 1000000LL;
+
+ if (denom == 0) {
+ /* batt id connector might be open, return 0 kohms */
+ return 0;
+ }
+ resistor_value_kohm = div64_s64(rpull_up * 1000000LL + denom/2, denom);
+
+ pr_debug("batt id voltage = %d, resistor value = %lld\n",
+ batt_id_uv, resistor_value_kohm);
+
+ return resistor_value_kohm;
+}
+
+struct device_node *of_batterydata_get_best_profile(
+ const struct device_node *batterydata_container_node,
+ int batt_id_kohm, const char *batt_type)
+{
+ struct batt_ids batt_ids;
+ struct device_node *node, *best_node = NULL;
+ const char *battery_type = NULL;
+ int delta = 0, best_delta = 0, best_id_kohm = 0, id_range_pct,
+ i = 0, rc = 0, limit = 0;
+ bool in_range = false;
+
+ /* read battery id range percentage for best profile */
+ rc = of_property_read_u32(batterydata_container_node,
+ "qcom,batt-id-range-pct", &id_range_pct);
+
+ if (rc) {
+ if (rc == -EINVAL) {
+ id_range_pct = 0;
+ } else {
+ pr_err("failed to read battery id range\n");
+ return ERR_PTR(-ENXIO);
+ }
+ }
+
+ /*
+ * Find the battery data with a battery id resistor closest to this one
+ */
+ for_each_child_of_node(batterydata_container_node, node) {
+ if (batt_type != NULL) {
+ rc = of_property_read_string(node, "qcom,battery-type",
+ &battery_type);
+ if (!rc && strcmp(battery_type, batt_type) == 0) {
+ best_node = node;
+ best_id_kohm = batt_id_kohm;
+ break;
+ }
+ } else {
+ rc = of_batterydata_read_batt_id_kohm(node,
+ "qcom,batt-id-kohm",
+ &batt_ids);
+ if (rc)
+ continue;
+ for (i = 0; i < batt_ids.num; i++) {
+ delta = abs(batt_ids.kohm[i] - batt_id_kohm);
+ limit = (batt_ids.kohm[i] * id_range_pct) / 100;
+ in_range = (delta <= limit);
+ /*
+ * Check if the delta is the lowest one
+ * and also if the limits are in range
+ * before selecting the best node.
+ */
+ if ((delta < best_delta || !best_node)
+ && in_range) {
+ best_node = node;
+ best_delta = delta;
+ best_id_kohm = batt_ids.kohm[i];
+ }
+ }
+ }
+ }
+
+ if (best_node == NULL) {
+ pr_err("No battery data found\n");
+ return best_node;
+ }
+
+ /* check that profile id is in range of the measured batt_id */
+ if (abs(best_id_kohm - batt_id_kohm) >
+ ((best_id_kohm * id_range_pct) / 100)) {
+ pr_err("out of range: profile id %d batt id %d pct %d\n",
+ best_id_kohm, batt_id_kohm, id_range_pct);
+ return NULL;
+ }
+
+ rc = of_property_read_string(best_node, "qcom,battery-type",
+ &battery_type);
+ if (!rc)
+ pr_info("%s found\n", battery_type);
+ else
+ pr_info("%s found\n", best_node->name);
+
+ return best_node;
+}
+
+int of_batterydata_read_data(struct device_node *batterydata_container_node,
+ struct bms_battery_data *batt_data,
+ int batt_id_uv)
+{
+ struct device_node *node, *best_node;
+ struct batt_ids batt_ids;
+ const char *battery_type = NULL;
+ int delta, best_delta, batt_id_kohm, rpull_up_kohm,
+ vadc_vdd_uv, best_id_kohm, i, rc = 0;
+
+ node = batterydata_container_node;
+ OF_PROP_READ(rpull_up_kohm, "rpull-up-kohm", node, rc, false);
+ OF_PROP_READ(vadc_vdd_uv, "vref-batt-therm", node, rc, false);
+ if (rc)
+ return rc;
+
+ batt_id_kohm = of_batterydata_convert_battery_id_kohm(batt_id_uv,
+ rpull_up_kohm, vadc_vdd_uv);
+ best_node = NULL;
+ best_delta = 0;
+ best_id_kohm = 0;
+
+ /*
+ * Find the battery data with a battery id resistor closest to this one
+ */
+ for_each_child_of_node(batterydata_container_node, node) {
+ rc = of_batterydata_read_batt_id_kohm(node,
+ "qcom,batt-id-kohm",
+ &batt_ids);
+ if (rc)
+ continue;
+ for (i = 0; i < batt_ids.num; i++) {
+ delta = abs(batt_ids.kohm[i] - batt_id_kohm);
+ if (delta < best_delta || !best_node) {
+ best_node = node;
+ best_delta = delta;
+ best_id_kohm = batt_ids.kohm[i];
+ }
+ }
+ }
+
+ if (best_node == NULL) {
+ pr_err("No battery data found\n");
+ return -ENODATA;
+ }
+ rc = of_property_read_string(best_node, "qcom,battery-type",
+ &battery_type);
+ if (!rc)
+ pr_info("%s loaded\n", battery_type);
+ else
+ pr_info("%s loaded\n", best_node->name);
+
+ return of_batterydata_load_battery_data(best_node,
+ best_id_kohm, batt_data);
+}
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 028b287..fe32ece 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -278,5 +278,30 @@
To compile this driver as a module, choose M here: the
module will be called vmd.
+config PCI_MSM
+ bool "MSM PCIe Controller driver"
+ depends on ARCH_QCOM && PCI
+ select PCI_DOMAINS
+ select PCI_DOMAINS_GENERIC
+ select PCI_MSI
+ select CRC8
+ help
+ Enables the PCIe functionality by configuring PCIe core on
+ MSM chipset and by enabling the ARM PCI framework extension.
+ The PCIe core is essential for communication between the host
+ and an endpoint.
+
+ If unsure, say N.
+
+config PCI_MSM_MSI
+ bool "MSM PCIe MSI support"
+ depends on PCI_MSM
+ depends on PCI_MSI_IRQ_DOMAIN
+ help
+ Say Y here if you want to enable MSI support for PCIe
+ controller and its devices. This controller offers
+ message signaled interrupts (MSI) allocation, routing,
+ masking, and affinity assigning for PCIe devices.
+
source "drivers/pci/controller/dwc/Kconfig"
endmenu
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index d56a507..200ddcb 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -29,6 +29,8 @@
obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o
obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o
obj-$(CONFIG_VMD) += vmd.o
+obj-$(CONFIG_PCI_MSM) += pci-msm.o
+obj-$(CONFIG_PCI_MSM_MSI) += pci-msm-msi.o
# pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW
obj-y += dwc/
diff --git a/drivers/pci/controller/pci-msm-msi.c b/drivers/pci/controller/pci-msm-msi.c
new file mode 100644
index 0000000..a8c04cb
--- /dev/null
+++ b/drivers/pci/controller/pci-msm-msi.c
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.*/
+
+#include <linux/interrupt.h>
+#include <linux/iommu.h>
+#include <linux/ipc_logging.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+
+struct msm_msi_irq {
+ unsigned int hwirq; /* MSI controller hwirq */
+ unsigned int virq; /* MSI controller virq */
+};
+
+struct msm_msi {
+ struct list_head clients;
+ struct device *dev;
+ struct device_node *of_node;
+ int nr_irqs;
+ struct msm_msi_irq *irqs;
+ unsigned long *bitmap; /* tracks used/unused MSI */
+ struct mutex mutex; /* mutex for modifying MSI client list and bitmap */
+ struct irq_domain *inner_domain; /* parent domain; gen irq related */
+ struct irq_domain *msi_domain; /* child domain; pci related */
+ phys_addr_t msi_addr;
+};
+
+/* structure for each client of MSI controller */
+struct msm_msi_client {
+ struct list_head node;
+ struct msm_msi *msi;
+ struct device *dev; /* client's dev of pci_dev */
+ u32 nr_irqs; /* nr_irqs allocated for client */
+ dma_addr_t msi_addr;
+};
+
+static void msm_msi_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct msm_msi *msi;
+ unsigned int virq;
+
+ chained_irq_enter(chip, desc);
+
+ msi = irq_desc_get_handler_data(desc);
+ virq = irq_find_mapping(msi->inner_domain, irq_desc_get_irq(desc));
+
+ generic_handle_irq(virq);
+
+ chained_irq_exit(chip, desc);
+}
+
+static struct irq_chip msm_msi_irq_chip = {
+ .name = "msm_pci_msi",
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static int msm_msi_domain_prepare(struct irq_domain *domain, struct device *dev,
+ int nvec, msi_alloc_info_t *arg)
+{
+ struct msm_msi *msi = domain->parent->host_data;
+ struct msm_msi_client *client;
+
+ client = devm_kzalloc(msi->dev, sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+
+ client->msi = msi;
+ client->dev = dev;
+ client->msi_addr = dma_map_resource(client->dev, msi->msi_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE, 0);
+ if (dma_mapping_error(client->dev, client->msi_addr)) {
+ dev_err(msi->dev, "MSI: failed to map msi address\n");
+ client->msi_addr = 0;
+ return -ENOMEM;
+ }
+
+ mutex_lock(&msi->mutex);
+ list_add_tail(&client->node, &msi->clients);
+ mutex_unlock(&msi->mutex);
+
+ /* zero out struct for framework */
+ memset(arg, 0, sizeof(*arg));
+
+ return 0;
+}
+
+void msm_msi_domain_finish(msi_alloc_info_t *arg, int retval)
+{
+ struct device *dev = arg->desc->dev;
+ struct irq_domain *domain = dev_get_msi_domain(dev);
+ struct msm_msi *msi = domain->parent->host_data;
+
+ /* if prepare or alloc fails, then clean up */
+ if (retval) {
+ struct msm_msi_client *tmp, *client = NULL;
+
+ mutex_lock(&msi->mutex);
+ list_for_each_entry(tmp, &msi->clients, node) {
+ if (tmp->dev == dev) {
+ client = tmp;
+ list_del(&client->node);
+ break;
+ }
+ }
+ mutex_unlock(&msi->mutex);
+
+ if (!client)
+ return;
+
+ if (client->msi_addr)
+ dma_unmap_resource(client->dev, client->msi_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE, 0);
+
+ devm_kfree(msi->dev, client);
+
+ return;
+ }
+}
+
+static struct msi_domain_ops msm_msi_domain_ops = {
+ .msi_prepare = msm_msi_domain_prepare,
+ .msi_finish = msm_msi_domain_finish,
+};
+
+static struct msi_domain_info msm_msi_domain_info = {
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
+ .ops = &msm_msi_domain_ops,
+ .chip = &msm_msi_irq_chip,
+};
+
+static int msm_msi_irq_set_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
+{
+ struct irq_data *parent_data = irq_get_irq_data(irqd_to_hwirq(data));
+
+ /* set affinity for MSM MSI HW IRQ */
+ if (parent_data->chip->irq_set_affinity)
+ return parent_data->chip->irq_set_affinity(parent_data,
+ mask, force);
+
+ return -EINVAL;
+}
+
+static void msm_msi_irq_compose_msi_msg(struct irq_data *data,
+ struct msi_msg *msg)
+{
+ struct msm_msi_client *client = irq_data_get_irq_chip_data(data);
+ struct irq_data *parent_data = irq_get_irq_data(irqd_to_hwirq(data));
+
+ msg->address_lo = lower_32_bits(client->msi_addr);
+ msg->address_hi = upper_32_bits(client->msi_addr);
+
+ /* GIC HW IRQ */
+ msg->data = irqd_to_hwirq(parent_data);
+}
+
+static struct irq_chip msm_msi_bottom_irq_chip = {
+ .name = "msm_msi",
+ .irq_set_affinity = msm_msi_irq_set_affinity,
+ .irq_compose_msi_msg = msm_msi_irq_compose_msi_msg,
+};
+
+static int msm_msi_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *args)
+{
+ struct msm_msi *msi = domain->host_data;
+ struct msm_msi_client *tmp, *client = NULL;
+ struct device *dev = ((msi_alloc_info_t *)args)->desc->dev;
+ int i, ret = 0;
+ int pos;
+
+ mutex_lock(&msi->mutex);
+ list_for_each_entry(tmp, &msi->clients, node) {
+ if (tmp->dev == dev) {
+ client = tmp;
+ break;
+ }
+ }
+
+ if (!client) {
+ dev_err(msi->dev, "MSI: failed to find client dev\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ pos = bitmap_find_next_zero_area(msi->bitmap, msi->nr_irqs, 0,
+ nr_irqs, 0);
+ if (pos < msi->nr_irqs) {
+ bitmap_set(msi->bitmap, pos, nr_irqs);
+ } else {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ for (i = 0; i < nr_irqs; i++) {
+ msi->irqs[pos].virq = virq + i;
+ irq_domain_set_info(domain, msi->irqs[pos].virq,
+ msi->irqs[pos].hwirq,
+ &msm_msi_bottom_irq_chip, client,
+ handle_simple_irq, NULL, NULL);
+ client->nr_irqs++;
+ pos++;
+ }
+
+out:
+ mutex_unlock(&msi->mutex);
+ return ret;
+}
+
+static void msm_msi_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *data = irq_domain_get_irq_data(domain, virq);
+ struct msm_msi_client *client = irq_data_get_irq_chip_data(data);
+ struct msm_msi *msi = client->msi;
+ int i;
+
+ mutex_lock(&msi->mutex);
+ for (i = 0; i < nr_irqs; i++)
+ if (msi->irqs[i].virq == virq)
+ break;
+
+ bitmap_clear(msi->bitmap, i, nr_irqs);
+ client->nr_irqs -= nr_irqs;
+
+ if (!client->nr_irqs) {
+ dma_unmap_resource(client->dev, client->msi_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE, 0);
+ list_del(&client->node);
+ devm_kfree(msi->dev, client);
+ }
+
+ mutex_unlock(&msi->mutex);
+
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .alloc = msm_msi_irq_domain_alloc,
+ .free = msm_msi_irq_domain_free,
+};
+
+static int msm_msi_alloc_domains(struct msm_msi *msi)
+{
+ msi->inner_domain = irq_domain_add_linear(NULL, msi->nr_irqs,
+ &msi_domain_ops, msi);
+ if (!msi->inner_domain) {
+ dev_err(msi->dev, "MSI: failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ msi->msi_domain = pci_msi_create_irq_domain(
+ of_node_to_fwnode(msi->of_node),
+ &msm_msi_domain_info,
+ msi->inner_domain);
+ if (!msi->msi_domain) {
+ dev_err(msi->dev, "MSI: failed to create MSI domain\n");
+ irq_domain_remove(msi->inner_domain);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int msm_msi_init(struct device *dev)
+{
+ int i, ret;
+ struct msm_msi *msi;
+ struct device_node *of_node;
+ const __be32 *prop_val;
+
+ if (!dev->of_node) {
+ dev_err(dev, "MSI: missing DT node\n");
+ return -EINVAL;
+ }
+
+ of_node = of_parse_phandle(dev->of_node, "msi-parent", 0);
+ if (!of_node) {
+ dev_err(dev, "MSI: no phandle for MSI found\n");
+ return -ENODEV;
+ }
+
+ if (!of_device_is_compatible(of_node, "qcom,pci-msi")) {
+ dev_err(dev, "MSI: no compatible qcom,pci-msi found\n");
+ return -ENODEV;
+ }
+
+ if (!of_find_property(of_node, "msi-controller", NULL))
+ return -ENODEV;
+
+ msi = devm_kzalloc(dev, sizeof(*msi), GFP_KERNEL);
+ if (!msi)
+ return -ENOMEM;
+
+ msi->dev = dev;
+ msi->of_node = of_node;
+ mutex_init(&msi->mutex);
+ INIT_LIST_HEAD(&msi->clients);
+
+ prop_val = of_get_address(msi->of_node, 0, NULL, NULL);
+ if (!prop_val) {
+ dev_err(msi->dev, "MSI: missing 'reg' devicetree\n");
+ return -EINVAL;
+ }
+
+ msi->msi_addr = be32_to_cpup(prop_val);
+ if (!msi->msi_addr) {
+ dev_err(msi->dev, "MSI: failed to get MSI address\n");
+ return -EINVAL;
+ }
+
+ msi->nr_irqs = of_irq_count(msi->of_node);
+ if (!msi->nr_irqs) {
+ dev_err(msi->dev, "MSI: found no MSI interrupts\n");
+ return -ENODEV;
+ }
+
+ msi->irqs = devm_kcalloc(msi->dev, msi->nr_irqs,
+ sizeof(*msi->irqs), GFP_KERNEL);
+ if (!msi->irqs)
+ return -ENOMEM;
+
+ msi->bitmap = devm_kcalloc(msi->dev, BITS_TO_LONGS(msi->nr_irqs),
+ sizeof(*msi->bitmap), GFP_KERNEL);
+ if (!msi->bitmap)
+ return -ENOMEM;
+
+ ret = msm_msi_alloc_domains(msi);
+ if (ret) {
+ dev_err(msi->dev, "MSI: failed to allocate MSI domains\n");
+ return ret;
+ }
+
+ for (i = 0; i < msi->nr_irqs; i++) {
+ unsigned int irq = irq_of_parse_and_map(msi->of_node, i);
+
+ if (!irq) {
+ dev_err(msi->dev,
+ "MSI: failed to parse/map interrupt\n");
+ ret = -ENODEV;
+ goto free_irqs;
+ }
+
+ msi->irqs[i].hwirq = irq;
+ irq_set_chained_handler_and_data(msi->irqs[i].hwirq,
+ msm_msi_handler, msi);
+ }
+
+ return 0;
+
+free_irqs:
+ for (--i; i >= 0; i--) {
+ irq_set_chained_handler_and_data(msi->irqs[i].hwirq,
+ NULL, NULL);
+ irq_dispose_mapping(msi->irqs[i].hwirq);
+ }
+
+ irq_domain_remove(msi->msi_domain);
+ irq_domain_remove(msi->inner_domain);
+
+ return ret;
+}
+EXPORT_SYMBOL(msm_msi_init);
diff --git a/drivers/pci/controller/pci-msm.c b/drivers/pci/controller/pci-msm.c
new file mode 100644
index 0000000..b053d91
--- /dev/null
+++ b/drivers/pci/controller/pci-msm.c
@@ -0,0 +1,6825 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.*/
+
+#include <asm/dma-iommu.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator-levels.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/clk/qcom.h>
+#include <linux/compiler.h>
+#include <linux/crc8.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/ipc_logging.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm_pcie.h>
+#include <linux/of_gpio.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeup.h>
+#include <linux/reset.h>
+#include <linux/regulator/consumer.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#include "../pci.h"
+
+#define PCIE_VENDOR_ID_QCOM (0x17cb)
+
+#define PCIE20_PARF_DBI_BASE_ADDR (0x350)
+#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE (0x358)
+
+#define PCIE_GEN3_SPCIE_CAP (0x0154)
+#define PCIE_GEN3_GEN2_CTRL (0x080c)
+#define PCIE_GEN3_RELATED (0x0890)
+#define PCIE_GEN3_EQ_CONTROL (0x08a8)
+#define PCIE_GEN3_EQ_FB_MODE_DIR_CHANGE (0x08ac)
+#define PCIE_GEN3_MISC_CONTROL (0x08bc)
+
+#define PCIE20_PARF_SYS_CTRL (0x00)
+#define PCIE20_PARF_PM_CTRL (0x20)
+#define PCIE20_PARF_PM_STTS (0x24)
+#define PCIE20_PARF_PHY_CTRL (0x40)
+#define PCIE20_PARF_TEST_BUS (0xe4)
+#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL (0x174)
+#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT (0x1a8)
+#define PCIE20_PARF_LTSSM (0x1b0)
+#define PCIE20_PARF_INT_ALL_STATUS (0x224)
+#define PCIE20_PARF_INT_ALL_CLEAR (0x228)
+#define PCIE20_PARF_INT_ALL_MASK (0x22c)
+#define PCIE20_PARF_DEVICE_TYPE (0x1000)
+#define PCIE20_PARF_BDF_TO_SID_TABLE_N (0x2000)
+
+#define PCIE20_ELBI_SYS_CTRL (0x04)
+#define PCIE20_ELBI_SYS_STTS (0x08)
+
+#define PCIE20_CAP (0x70)
+#define PCIE20_CAP_DEVCTRLSTATUS (PCIE20_CAP + 0x08)
+#define PCIE20_CAP_LINKCTRLSTATUS (PCIE20_CAP + 0x10)
+
+#define PCIE20_COMMAND_STATUS (0x04)
+#define PCIE20_HEADER_TYPE (0x0c)
+#define PCIE20_BRIDGE_CTRL (0x3c)
+#define PCIE20_DEVICE_CONTROL_STATUS (0x78)
+#define PCIE20_DEVICE_CONTROL2_STATUS2 (0x98)
+
+#define PCIE20_AUX_CLK_FREQ_REG (0xb40)
+#define PCIE20_ACK_F_ASPM_CTRL_REG (0x70c)
+#define PCIE20_ACK_N_FTS (0xff00)
+
+#define PCIE20_PLR_IATU_VIEWPORT (0x900)
+#define PCIE20_PLR_IATU_CTRL1 (0x904)
+#define PCIE20_PLR_IATU_CTRL2 (0x908)
+#define PCIE20_PLR_IATU_LBAR (0x90c)
+#define PCIE20_PLR_IATU_UBAR (0x910)
+#define PCIE20_PLR_IATU_LAR (0x914)
+#define PCIE20_PLR_IATU_LTAR (0x918)
+#define PCIE20_PLR_IATU_UTAR (0x91c)
+
+#define PCIE_IATU_BASE(n) (n * 0x200)
+#define PCIE_IATU_CTRL1(n) (PCIE_IATU_BASE(n) + 0x00)
+#define PCIE_IATU_CTRL2(n) (PCIE_IATU_BASE(n) + 0x04)
+#define PCIE_IATU_LBAR(n) (PCIE_IATU_BASE(n) + 0x08)
+#define PCIE_IATU_UBAR(n) (PCIE_IATU_BASE(n) + 0x0c)
+#define PCIE_IATU_LAR(n) (PCIE_IATU_BASE(n) + 0x10)
+#define PCIE_IATU_LTAR(n) (PCIE_IATU_BASE(n) + 0x14)
+#define PCIE_IATU_UTAR(n) (PCIE_IATU_BASE(n) + 0x18)
+
+#define PCIE20_PORT_LINK_CTRL_REG (0x710)
+#define PCIE20_PIPE_LOOPBACK_CONTROL (0x8b8)
+#define LOOPBACK_BASE_ADDR_OFFSET (0x8000)
+
+#define PCIE20_CTRL1_TYPE_CFG0 (0x04)
+#define PCIE20_CTRL1_TYPE_CFG1 (0x05)
+
+#define PCIE20_CAP_ID (0x10)
+#define L1SUB_CAP_ID (0x1e)
+
+#define PCIE_CAP_PTR_OFFSET (0x34)
+#define PCIE_EXT_CAP_OFFSET (0x100)
+
+#define PCIE20_AER_UNCORR_ERR_STATUS_REG (0x104)
+#define PCIE20_AER_CORR_ERR_STATUS_REG (0x110)
+#define PCIE20_AER_ROOT_ERR_STATUS_REG (0x130)
+#define PCIE20_AER_ERR_SRC_ID_REG (0x134)
+
+#define RD (0)
+#define WR (1)
+#define MSM_PCIE_ERROR (-1)
+
+#define PERST_PROPAGATION_DELAY_US_MIN (1000)
+#define PERST_PROPAGATION_DELAY_US_MAX (1005)
+#define SWITCH_DELAY_MAX (20)
+#define REFCLK_STABILIZATION_DELAY_US_MIN (1000)
+#define REFCLK_STABILIZATION_DELAY_US_MAX (1005)
+#define LINK_UP_TIMEOUT_US_MIN (5000)
+#define LINK_UP_TIMEOUT_US_MAX (5100)
+#define LINK_UP_CHECK_MAX_COUNT (20)
+#define EP_UP_TIMEOUT_US_MIN (1000)
+#define EP_UP_TIMEOUT_US_MAX (1005)
+#define EP_UP_TIMEOUT_US (1000000)
+#define PHY_STABILIZATION_DELAY_US_MIN (995)
+#define PHY_STABILIZATION_DELAY_US_MAX (1005)
+
+#define MSM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0))
+
+#define GEN1_SPEED (0x1)
+#define GEN2_SPEED (0x2)
+#define GEN3_SPEED (0x3)
+
+#define MSM_PCIE_IOMMU_PRESENT BIT(0)
+#define MSM_PCIE_IOMMU_S1_BYPASS BIT(1)
+#define MSM_PCIE_IOMMU_FAST BIT(2)
+#define MSM_PCIE_IOMMU_ATOMIC BIT(3)
+#define MSM_PCIE_IOMMU_FORCE_COHERENT BIT(4)
+
+#define PHY_READY_TIMEOUT_COUNT (10)
+#define XMLH_LINK_UP (0x400)
+#define MAX_PROP_SIZE (32)
+#define MAX_RC_NAME_LEN (15)
+#define MSM_PCIE_MAX_VREG (4)
+#define MSM_PCIE_MAX_CLK (13)
+#define MSM_PCIE_MAX_PIPE_CLK (1)
+#define MAX_RC_NUM (3)
+#define MAX_DEVICE_NUM (20)
+#define PCIE_TLP_RD_SIZE (0x5)
+#define PCIE_MSI_NR_IRQS (256)
+#define PCIE_LOG_PAGES (50)
+#define PCIE_CONF_SPACE_DW (1024)
+#define PCIE_CLEAR (0xdeadbeef)
+#define PCIE_LINK_DOWN (0xffffffff)
+
+#define MSM_PCIE_MAX_RESET (5)
+#define MSM_PCIE_MAX_PIPE_RESET (1)
+
+#define MSM_PCIE_MSI_PHY (0xa0000000)
+#define PCIE20_MSI_CTRL_ADDR (0x820)
+#define PCIE20_MSI_CTRL_UPPER_ADDR (0x824)
+#define PCIE20_MSI_CTRL_INTR_EN (0x828)
+#define PCIE20_MSI_CTRL_INTR_MASK (0x82c)
+#define PCIE20_MSI_CTRL_INTR_STATUS (0x830)
+#define PCIE20_MSI_CTRL_MAX (8)
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+#define PCIE_UPPER_ADDR(addr) ((u32)((addr) >> 32))
+#else
+#define PCIE_UPPER_ADDR(addr) (0x0)
+#endif
+#define PCIE_LOWER_ADDR(addr) ((u32)((addr) & 0xffffffff))
+
+#define PCIE_BUS_PRIV_DATA(bus) \
+ (struct msm_pcie_dev_t *)(bus->sysdata)
+
+/* Config Space Offsets */
+#define BDF_OFFSET(bus, devfn) \
+ ((bus << 24) | (devfn << 16))
+
+#define PCIE_GEN_DBG(x...) do { \
+ if (msm_pcie_debug_mask) \
+ pr_alert(x); \
+ } while (0)
+
+#define PCIE_DBG(dev, fmt, arg...) do { \
+ if ((dev) && (dev)->ipc_log_long) \
+ ipc_log_string((dev)->ipc_log_long, \
+ "DBG1:%s: " fmt, __func__, arg); \
+ if ((dev) && (dev)->ipc_log) \
+ ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
+ if (msm_pcie_debug_mask) \
+ pr_alert("%s: " fmt, __func__, arg); \
+ } while (0)
+
+#define PCIE_DBG2(dev, fmt, arg...) do { \
+ if ((dev) && (dev)->ipc_log) \
+ ipc_log_string((dev)->ipc_log, "DBG2:%s: " fmt, __func__, arg);\
+ if (msm_pcie_debug_mask) \
+ pr_alert("%s: " fmt, __func__, arg); \
+ } while (0)
+
+#define PCIE_DBG3(dev, fmt, arg...) do { \
+ if ((dev) && (dev)->ipc_log) \
+ ipc_log_string((dev)->ipc_log, "DBG3:%s: " fmt, __func__, arg);\
+ if (msm_pcie_debug_mask) \
+ pr_alert("%s: " fmt, __func__, arg); \
+ } while (0)
+
+#define PCIE_DUMP(dev, fmt, arg...) do { \
+ if ((dev) && (dev)->ipc_log_dump) \
+ ipc_log_string((dev)->ipc_log_dump, \
+ "DUMP:%s: " fmt, __func__, arg); \
+ } while (0)
+
+#define PCIE_DBG_FS(dev, fmt, arg...) do { \
+ if ((dev) && (dev)->ipc_log_dump) \
+ ipc_log_string((dev)->ipc_log_dump, \
+ "DBG_FS:%s: " fmt, __func__, arg); \
+ pr_alert("%s: " fmt, __func__, arg); \
+ } while (0)
+
+#define PCIE_INFO(dev, fmt, arg...) do { \
+ if ((dev) && (dev)->ipc_log_long) \
+ ipc_log_string((dev)->ipc_log_long, \
+ "INFO:%s: " fmt, __func__, arg); \
+ if ((dev) && (dev)->ipc_log) \
+ ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
+ pr_info("%s: " fmt, __func__, arg); \
+ } while (0)
+
+#define PCIE_ERR(dev, fmt, arg...) do { \
+ if ((dev) && (dev)->ipc_log_long) \
+ ipc_log_string((dev)->ipc_log_long, \
+ "ERR:%s: " fmt, __func__, arg); \
+ if ((dev) && (dev)->ipc_log) \
+ ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
+ pr_err("%s: " fmt, __func__, arg); \
+ } while (0)
+
+
+enum msm_pcie_res {
+ MSM_PCIE_RES_PARF,
+ MSM_PCIE_RES_PHY,
+ MSM_PCIE_RES_DM_CORE,
+ MSM_PCIE_RES_ELBI,
+ MSM_PCIE_RES_IATU,
+ MSM_PCIE_RES_CONF,
+ MSM_PCIE_RES_IO,
+ MSM_PCIE_RES_BARS,
+ MSM_PCIE_RES_TCSR,
+ MSM_PCIE_RES_RUMI,
+ MSM_PCIE_MAX_RES,
+};
+
+enum msm_pcie_irq {
+ MSM_PCIE_INT_MSI,
+ MSM_PCIE_INT_A,
+ MSM_PCIE_INT_B,
+ MSM_PCIE_INT_C,
+ MSM_PCIE_INT_D,
+ MSM_PCIE_INT_GLOBAL_INT,
+ MSM_PCIE_MAX_IRQ,
+};
+
+enum msm_pcie_irq_event {
+ MSM_PCIE_INT_EVT_LINK_DOWN = 1,
+ MSM_PCIE_INT_EVT_BME,
+ MSM_PCIE_INT_EVT_PM_TURNOFF,
+ MSM_PCIE_INT_EVT_DEBUG,
+ MSM_PCIE_INT_EVT_LTR,
+ MSM_PCIE_INT_EVT_MHI_Q6,
+ MSM_PCIE_INT_EVT_MHI_A7,
+ MSM_PCIE_INT_EVT_DSTATE_CHANGE,
+ MSM_PCIE_INT_EVT_L1SUB_TIMEOUT,
+ MSM_PCIE_INT_EVT_MMIO_WRITE,
+ MSM_PCIE_INT_EVT_CFG_WRITE,
+ MSM_PCIE_INT_EVT_BRIDGE_FLUSH_N,
+ MSM_PCIE_INT_EVT_LINK_UP,
+ MSM_PCIE_INT_EVT_AER_LEGACY,
+ MSM_PCIE_INT_EVT_AER_ERR,
+ MSM_PCIE_INT_EVT_PME_LEGACY,
+ MSM_PCIE_INT_EVT_PLS_PME,
+ MSM_PCIE_INT_EVT_INTD,
+ MSM_PCIE_INT_EVT_INTC,
+ MSM_PCIE_INT_EVT_INTB,
+ MSM_PCIE_INT_EVT_INTA,
+ MSM_PCIE_INT_EVT_EDMA,
+ MSM_PCIE_INT_EVT_MSI_0,
+ MSM_PCIE_INT_EVT_MSI_1,
+ MSM_PCIE_INT_EVT_MSI_2,
+ MSM_PCIE_INT_EVT_MSI_3,
+ MSM_PCIE_INT_EVT_MSI_4,
+ MSM_PCIE_INT_EVT_MSI_5,
+ MSM_PCIE_INT_EVT_MSI_6,
+ MSM_PCIE_INT_EVT_MSI_7,
+ MSM_PCIE_INT_EVT_MAX = 30,
+};
+
+enum msm_pcie_gpio {
+ MSM_PCIE_GPIO_PERST,
+ MSM_PCIE_GPIO_WAKE,
+ MSM_PCIE_GPIO_EP,
+ MSM_PCIE_MAX_GPIO
+};
+
+enum msm_pcie_link_status {
+ MSM_PCIE_LINK_DEINIT,
+ MSM_PCIE_LINK_ENABLED,
+ MSM_PCIE_LINK_DISABLED
+};
+
+enum msm_pcie_boot_option {
+ MSM_PCIE_NO_PROBE_ENUMERATION = BIT(0),
+ MSM_PCIE_NO_WAKE_ENUMERATION = BIT(1)
+};
+
+enum msm_pcie_debugfs_option {
+ MSM_PCIE_OUTPUT_PCIE_INFO,
+ MSM_PCIE_DISABLE_LINK,
+ MSM_PCIE_ENABLE_LINK,
+ MSM_PCIE_DISABLE_ENABLE_LINK,
+ MSM_PCIE_DUMP_SHADOW_REGISTER,
+ MSM_PCIE_DISABLE_L0S,
+ MSM_PCIE_ENABLE_L0S,
+ MSM_PCIE_DISABLE_L1,
+ MSM_PCIE_ENABLE_L1,
+ MSM_PCIE_DISABLE_L1SS,
+ MSM_PCIE_ENABLE_L1SS,
+ MSM_PCIE_ENUMERATION,
+ MSM_PCIE_READ_PCIE_REGISTER,
+ MSM_PCIE_WRITE_PCIE_REGISTER,
+ MSM_PCIE_DUMP_PCIE_REGISTER_SPACE,
+ MSM_PCIE_ALLOCATE_DDR_MAP_LBAR,
+ MSM_PCIE_FREE_DDR_UNMAP_LBAR,
+ MSM_PCIE_OUTPUT_DDR_LBAR_ADDRESS,
+ MSM_PCIE_CONFIGURE_LOOPBACK,
+ MSM_PCIE_SETUP_LOOPBACK_IATU,
+ MSM_PCIE_READ_DDR,
+ MSM_PCIE_READ_LBAR,
+ MSM_PCIE_WRITE_DDR,
+ MSM_PCIE_WRITE_LBAR,
+ MSM_PCIE_DISABLE_AER,
+ MSM_PCIE_ENABLE_AER,
+ MSM_PCIE_GPIO_STATUS,
+ MSM_PCIE_ASSERT_PERST,
+ MSM_PCIE_DEASSERT_PERST,
+ MSM_PCIE_KEEP_RESOURCES_ON,
+ MSM_PCIE_FORCE_GEN1,
+ MSM_PCIE_FORCE_GEN2,
+ MSM_PCIE_FORCE_GEN3,
+ MSM_PCIE_MAX_DEBUGFS_OPTION
+};
+
+static const char * const
+ msm_pcie_debugfs_option_desc[MSM_PCIE_MAX_DEBUGFS_OPTION] = {
+ "OUTPUT PCIE INFO",
+ "DISABLE LINK",
+ "ENABLE LINK",
+ "DISABLE AND ENABLE LINK",
+ "DUMP PCIE SHADOW REGISTER",
+ "DISABLE L0S",
+ "ENABLE L0S",
+ "DISABLE L1",
+ "ENABLE L1",
+ "DISABLE L1SS",
+ "ENABLE L1SS",
+ "ENUMERATE",
+ "READ A PCIE REGISTER",
+ "WRITE TO PCIE REGISTER",
+ "DUMP PCIE REGISTER SPACE",
+ "ALLOCATE DDR AND MAP LBAR",
+ "FREE DDR AND UNMAP LBAR",
+ "OUTPUT DDR AND LBAR VIR ADDRESS",
+ "CONFIGURE PCIE LOOPBACK",
+ "SETUP LOOPBACK IATU",
+ "READ DDR",
+ "READ LBAR",
+ "WRITE DDR",
+ "WRITE LBAR",
+ "SET AER ENABLE FLAG",
+ "CLEAR AER ENABLE FLAG",
+ "OUTPUT PERST AND WAKE GPIO STATUS",
+ "ASSERT PERST",
+ "DE-ASSERT PERST",
+ "SET KEEP_RESOURCES_ON FLAG",
+ "SET MAXIMUM LINK SPEED TO GEN 1",
+ "SET MAXIMUM LINK SPEED TO GEN 2",
+ "SET MAXIMUM LINK SPEED TO GEN 3",
+};
+
+/* gpio info structure */
+struct msm_pcie_gpio_info_t {
+ char *name;
+ uint32_t num;
+ bool out;
+ uint32_t on;
+ uint32_t init;
+ bool required;
+};
+
+/* voltage regulator info structrue */
+struct msm_pcie_vreg_info_t {
+ struct regulator *hdl;
+ char *name;
+ uint32_t max_v;
+ uint32_t min_v;
+ uint32_t opt_mode;
+ bool required;
+};
+
+/* reset info structure */
+struct msm_pcie_reset_info_t {
+ struct reset_control *hdl;
+ char *name;
+ bool required;
+};
+
+/* clock info structure */
+struct msm_pcie_clk_info_t {
+ struct clk *hdl;
+ char *name;
+ u32 freq;
+ bool config_mem;
+ bool required;
+};
+
+/* resource info structure */
+struct msm_pcie_res_info_t {
+ char *name;
+ struct resource *resource;
+ void __iomem *base;
+};
+
+/* irq info structrue */
+struct msm_pcie_irq_info_t {
+ char *name;
+ uint32_t num;
+};
+
+/* phy info structure */
+struct msm_pcie_phy_info_t {
+ u32 offset;
+ u32 val;
+ u32 delay;
+};
+
+/* sid info structure */
+struct msm_pcie_sid_info_t {
+ u16 bdf;
+ u8 pcie_sid;
+ u8 hash;
+ u8 next_hash;
+ u32 smmu_sid;
+ u32 value;
+};
+
+/* PCIe device info structure */
+struct msm_pcie_device_info {
+ u32 bdf;
+ struct pci_dev *dev;
+ short short_bdf;
+ u32 sid;
+ int domain;
+ void __iomem *conf_base;
+ unsigned long phy_address;
+ u32 dev_ctrlstts_offset;
+ struct msm_pcie_register_event *event_reg;
+ bool registered;
+};
+
+/* msm pcie device structure */
+struct msm_pcie_dev_t {
+ struct platform_device *pdev;
+ struct pci_dev *dev;
+ struct regulator *gdsc;
+ struct msm_pcie_vreg_info_t vreg[MSM_PCIE_MAX_VREG];
+ struct msm_pcie_gpio_info_t gpio[MSM_PCIE_MAX_GPIO];
+ struct msm_pcie_clk_info_t clk[MSM_PCIE_MAX_CLK];
+ struct msm_pcie_clk_info_t pipeclk[MSM_PCIE_MAX_PIPE_CLK];
+ struct msm_pcie_res_info_t res[MSM_PCIE_MAX_RES];
+ struct msm_pcie_irq_info_t irq[MSM_PCIE_MAX_IRQ];
+ struct msm_pcie_reset_info_t reset[MSM_PCIE_MAX_RESET];
+ struct msm_pcie_reset_info_t pipe_reset[MSM_PCIE_MAX_PIPE_RESET];
+
+ void __iomem *parf;
+ void __iomem *phy;
+ void __iomem *elbi;
+ void __iomem *iatu;
+ void __iomem *dm_core;
+ void __iomem *conf;
+ void __iomem *bars;
+ void __iomem *tcsr;
+ void __iomem *rumi;
+
+ uint32_t axi_bar_start;
+ uint32_t axi_bar_end;
+
+ struct resource *dev_mem_res;
+ struct resource *dev_io_res;
+
+ uint32_t wake_n;
+ uint32_t vreg_n;
+ uint32_t gpio_n;
+ uint32_t parf_deemph;
+ uint32_t parf_swing;
+
+ bool cfg_access;
+ spinlock_t cfg_lock;
+ unsigned long irqsave_flags;
+ struct mutex enumerate_lock;
+ struct mutex setup_lock;
+
+ struct irq_domain *irq_domain;
+ DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_NR_IRQS);
+ bool use_msi;
+
+ enum msm_pcie_link_status link_status;
+ bool user_suspend;
+ bool disable_pc;
+ struct pci_saved_state *saved_state;
+
+ struct wakeup_source ws;
+ struct msm_bus_scale_pdata *bus_scale_table;
+ uint32_t bus_client;
+
+ bool l0s_supported;
+ bool l1_supported;
+ bool l1ss_supported;
+ bool l1_1_pcipm_supported;
+ bool l1_2_pcipm_supported;
+ bool l1_1_aspm_supported;
+ bool l1_2_aspm_supported;
+ bool common_clk_en;
+ bool clk_power_manage_en;
+ bool aux_clk_sync;
+ bool aer_enable;
+ uint32_t smmu_sid_base;
+ uint32_t link_check_max_count;
+ uint32_t target_link_speed;
+ uint32_t n_fts;
+ uint32_t ep_latency;
+ uint32_t switch_latency;
+ uint32_t wr_halt_size;
+ uint32_t slv_addr_space_size;
+ uint32_t phy_status_offset;
+ uint32_t phy_power_down_offset;
+ uint32_t cpl_timeout;
+ uint32_t current_bdf;
+ uint32_t perst_delay_us_min;
+ uint32_t perst_delay_us_max;
+ uint32_t tlp_rd_size;
+ bool linkdown_panic;
+ uint32_t boot_option;
+
+ uint32_t rc_idx;
+ uint32_t phy_ver;
+ bool drv_ready;
+ bool enumerated;
+ struct work_struct handle_wake_work;
+ struct mutex recovery_lock;
+ spinlock_t wakeup_lock;
+ spinlock_t irq_lock;
+ ulong linkdown_counter;
+ ulong link_turned_on_counter;
+ ulong link_turned_off_counter;
+ ulong rc_corr_counter;
+ ulong rc_non_fatal_counter;
+ ulong rc_fatal_counter;
+ ulong ep_corr_counter;
+ ulong ep_non_fatal_counter;
+ ulong ep_fatal_counter;
+ bool suspending;
+ ulong wake_counter;
+ u32 num_active_ep;
+ u32 num_ep;
+ bool pending_ep_reg;
+ u32 phy_len;
+ struct msm_pcie_phy_info_t *phy_sequence;
+ u32 sid_info_len;
+ struct msm_pcie_sid_info_t *sid_info;
+ u32 ep_shadow[MAX_DEVICE_NUM][PCIE_CONF_SPACE_DW];
+ u32 rc_shadow[PCIE_CONF_SPACE_DW];
+ bool shadow_en;
+ bool bridge_found;
+ struct msm_pcie_register_event *event_reg;
+ bool power_on;
+ void *ipc_log;
+ void *ipc_log_long;
+ void *ipc_log_dump;
+ bool use_19p2mhz_aux_clk;
+ bool use_pinctrl;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_sleep;
+ struct msm_pcie_device_info pcidev_table[MAX_DEVICE_NUM];
+
+ void (*rumi_init)(struct msm_pcie_dev_t *pcie_dev);
+};
+
+struct msm_root_dev_t {
+ struct msm_pcie_dev_t *pcie_dev;
+ struct pci_dev *pci_dev;
+ uint32_t iommu_cfg;
+ dma_addr_t iommu_base;
+ size_t iommu_size;
+};
+
+/* debug mask sys interface */
+static int msm_pcie_debug_mask;
+module_param_named(debug_mask, msm_pcie_debug_mask,
+ int, 0644);
+
+/*
+ * For each bit set, invert the default capability
+ * option for the corresponding root complex
+ * and its devices.
+ */
+static int msm_pcie_invert_l0s_support;
+module_param_named(invert_l0s_support, msm_pcie_invert_l0s_support,
+ int, 0644);
+static int msm_pcie_invert_l1_support;
+module_param_named(invert_l1_support, msm_pcie_invert_l1_support,
+ int, 0644);
+static int msm_pcie_invert_l1ss_support;
+module_param_named(invert_l1ss_support, msm_pcie_invert_l1ss_support,
+ int, 0644);
+static int msm_pcie_invert_aer_support;
+module_param_named(invert_aer_support, msm_pcie_invert_aer_support,
+ int, 0644);
+
+/*
+ * For each bit set, keep the resources on when link training fails
+ * or linkdown occurs for the corresponding root complex
+ */
+static int msm_pcie_keep_resources_on;
+module_param_named(keep_resources_on, msm_pcie_keep_resources_on,
+ int, 0644);
+
+/*
+ * For each bit set, force the corresponding root complex
+ * to do link training at gen1 speed.
+ */
+static int msm_pcie_force_gen1;
+module_param_named(force_gen1, msm_pcie_force_gen1,
+ int, 0644);
+
+
+/*
+ * For each bit set in BIT[3:0] determines which corresponding
+ * root complex will use the value in BIT[31:4] to override the
+ * default (LINK_UP_CHECK_MAX_COUNT) max check count for link training.
+ * Each iteration is LINK_UP_TIMEOUT_US_MIN long.
+ */
+static int msm_pcie_link_check_max_count;
+module_param_named(link_check_max_count, msm_pcie_link_check_max_count,
+ int, 0644);
+
+/* debugfs values */
+static u32 rc_sel = BIT(0);
+static u32 base_sel;
+static u32 wr_offset;
+static u32 wr_mask;
+static u32 wr_value;
+static u32 corr_counter_limit = 5;
+
+/* CRC8 table for BDF to SID translation */
+static u8 msm_pcie_crc8_table[CRC8_TABLE_SIZE];
+
+/* Table to track info of PCIe devices */
+static struct msm_pcie_device_info
+ msm_pcie_dev_tbl[MAX_RC_NUM * MAX_DEVICE_NUM];
+
+/* PCIe driver state */
+static struct pcie_drv_sta {
+ u32 rc_num;
+ struct mutex drv_lock;
+} pcie_drv;
+
+/* msm pcie device data */
+static struct msm_pcie_dev_t msm_pcie_dev[MAX_RC_NUM];
+
+/* regulators */
+static struct msm_pcie_vreg_info_t msm_pcie_vreg_info[MSM_PCIE_MAX_VREG] = {
+ {NULL, "vreg-3p3", 0, 0, 0, false},
+ {NULL, "vreg-1p8", 1800000, 1800000, 14000, true},
+ {NULL, "vreg-0p9", 1000000, 1000000, 40000, true},
+ {NULL, "vreg-cx", 0, 0, 0, false}
+};
+
+/* GPIOs */
+static struct msm_pcie_gpio_info_t msm_pcie_gpio_info[MSM_PCIE_MAX_GPIO] = {
+ {"perst-gpio", 0, 1, 0, 0, 1},
+ {"wake-gpio", 0, 0, 0, 0, 0},
+ {"qcom,ep-gpio", 0, 1, 1, 0, 0}
+};
+
+/* resets */
+static struct msm_pcie_reset_info_t
+msm_pcie_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_RESET] = {
+ {
+ {NULL, "pcie_0_core_reset", false},
+ {NULL, "pcie_phy_reset", false},
+ {NULL, "pcie_phy_com_reset", false},
+ {NULL, "pcie_phy_nocsr_com_phy_reset", false},
+ {NULL, "pcie_0_phy_reset", false}
+ },
+ {
+ {NULL, "pcie_1_core_reset", false},
+ {NULL, "pcie_phy_reset", false},
+ {NULL, "pcie_phy_com_reset", false},
+ {NULL, "pcie_phy_nocsr_com_phy_reset", false},
+ {NULL, "pcie_1_phy_reset", false}
+ },
+ {
+ {NULL, "pcie_2_core_reset", false},
+ {NULL, "pcie_phy_reset", false},
+ {NULL, "pcie_phy_com_reset", false},
+ {NULL, "pcie_phy_nocsr_com_phy_reset", false},
+ {NULL, "pcie_2_phy_reset", false}
+ }
+};
+
+/* pipe reset */
+static struct msm_pcie_reset_info_t
+msm_pcie_pipe_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_PIPE_RESET] = {
+ {
+ {NULL, "pcie_0_phy_pipe_reset", false}
+ },
+ {
+ {NULL, "pcie_1_phy_pipe_reset", false}
+ },
+ {
+ {NULL, "pcie_2_phy_pipe_reset", false}
+ }
+};
+
+/* clocks */
+static struct msm_pcie_clk_info_t
+ msm_pcie_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_CLK] = {
+ {
+ {NULL, "pcie_0_ref_clk_src", 0, false, false},
+ {NULL, "pcie_0_aux_clk", 1010000, false, true},
+ {NULL, "pcie_0_cfg_ahb_clk", 0, false, true},
+ {NULL, "pcie_0_mstr_axi_clk", 0, true, true},
+ {NULL, "pcie_0_slv_axi_clk", 0, true, true},
+ {NULL, "pcie_0_ldo", 0, false, true},
+ {NULL, "pcie_0_smmu_clk", 0, false, false},
+ {NULL, "pcie_0_slv_q2a_axi_clk", 0, false, false},
+ {NULL, "pcie_0_sleep_clk", 0, false, false},
+ {NULL, "pcie_phy_refgen_clk", 0, false, false},
+ {NULL, "pcie_tbu_clk", 0, false, false},
+ {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
+ {NULL, "pcie_phy_aux_clk", 0, false, false}
+ },
+ {
+ {NULL, "pcie_1_ref_clk_src", 0, false, false},
+ {NULL, "pcie_1_aux_clk", 1010000, false, true},
+ {NULL, "pcie_1_cfg_ahb_clk", 0, false, true},
+ {NULL, "pcie_1_mstr_axi_clk", 0, true, true},
+ {NULL, "pcie_1_slv_axi_clk", 0, true, true},
+ {NULL, "pcie_1_ldo", 0, false, true},
+ {NULL, "pcie_1_smmu_clk", 0, false, false},
+ {NULL, "pcie_1_slv_q2a_axi_clk", 0, false, false},
+ {NULL, "pcie_1_sleep_clk", 0, false, false},
+ {NULL, "pcie_phy_refgen_clk", 0, false, false},
+ {NULL, "pcie_tbu_clk", 0, false, false},
+ {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
+ {NULL, "pcie_phy_aux_clk", 0, false, false}
+ },
+ {
+ {NULL, "pcie_2_ref_clk_src", 0, false, false},
+ {NULL, "pcie_2_aux_clk", 1010000, false, true},
+ {NULL, "pcie_2_cfg_ahb_clk", 0, false, true},
+ {NULL, "pcie_2_mstr_axi_clk", 0, true, true},
+ {NULL, "pcie_2_slv_axi_clk", 0, true, true},
+ {NULL, "pcie_2_ldo", 0, false, true},
+ {NULL, "pcie_2_smmu_clk", 0, false, false},
+ {NULL, "pcie_2_slv_q2a_axi_clk", 0, false, false},
+ {NULL, "pcie_2_sleep_clk", 0, false, false},
+ {NULL, "pcie_phy_refgen_clk", 0, false, false},
+ {NULL, "pcie_tbu_clk", 0, false, false},
+ {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
+ {NULL, "pcie_phy_aux_clk", 0, false, false}
+ }
+};
+
+/* Pipe Clocks */
+static struct msm_pcie_clk_info_t
+ msm_pcie_pipe_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_PIPE_CLK] = {
+ {
+ {NULL, "pcie_0_pipe_clk", 125000000, true, true},
+ },
+ {
+ {NULL, "pcie_1_pipe_clk", 125000000, true, true},
+ },
+ {
+ {NULL, "pcie_2_pipe_clk", 125000000, true, true},
+ }
+};
+
+/* resources */
+static const struct msm_pcie_res_info_t msm_pcie_res_info[MSM_PCIE_MAX_RES] = {
+ {"parf", NULL, NULL},
+ {"phy", NULL, NULL},
+ {"dm_core", NULL, NULL},
+ {"elbi", NULL, NULL},
+ {"iatu", NULL, NULL},
+ {"conf", NULL, NULL},
+ {"io", NULL, NULL},
+ {"bars", NULL, NULL},
+ {"tcsr", NULL, NULL},
+ {"rumi", NULL, NULL}
+};
+
+/* irqs */
+static const struct msm_pcie_irq_info_t msm_pcie_irq_info[MSM_PCIE_MAX_IRQ] = {
+ {"int_msi", 0},
+ {"int_a", 0},
+ {"int_b", 0},
+ {"int_c", 0},
+ {"int_d", 0},
+ {"int_global_int", 0}
+};
+
+static void msm_pcie_config_sid(struct msm_pcie_dev_t *dev);
+static void msm_pcie_config_l0s_disable_all(struct msm_pcie_dev_t *dev,
+ struct pci_bus *bus);
+static void msm_pcie_config_l1_disable_all(struct msm_pcie_dev_t *dev,
+ struct pci_bus *bus);
+static void msm_pcie_config_l1ss_disable_all(struct msm_pcie_dev_t *dev,
+ struct pci_bus *bus);
+static void msm_pcie_config_l0s_enable_all(struct msm_pcie_dev_t *dev);
+static void msm_pcie_config_l1_enable_all(struct msm_pcie_dev_t *dev);
+static void msm_pcie_config_l1ss_enable_all(struct msm_pcie_dev_t *dev);
+
+static void msm_pcie_check_l1ss_support_all(struct msm_pcie_dev_t *dev);
+
+static void msm_pcie_config_link_pm(struct msm_pcie_dev_t *dev, bool enable);
+
+#ifdef CONFIG_ARM
+static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev)
+{
+ pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
+}
+#else
+static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev)
+{
+}
+#endif
+
+static inline void msm_pcie_write_reg(void __iomem *base, u32 offset, u32 value)
+{
+ writel_relaxed(value, base + offset);
+ /* ensure that changes propagated to the hardware */
+ wmb();
+}
+
+static inline void msm_pcie_write_reg_field(void __iomem *base, u32 offset,
+ const u32 mask, u32 val)
+{
+ u32 shift = find_first_bit((void *)&mask, 32);
+ u32 tmp = readl_relaxed(base + offset);
+
+ tmp &= ~mask; /* clear written bits */
+ val = tmp | (val << shift);
+ writel_relaxed(val, base + offset);
+ /* ensure that changes propagated to the hardware */
+ wmb();
+}
+
+static inline void msm_pcie_config_clear_set_dword(struct pci_dev *pdev,
+ int pos, u32 clear, u32 set)
+{
+ u32 val;
+
+ pci_read_config_dword(pdev, pos, &val);
+ val &= ~clear;
+ val |= set;
+ pci_write_config_dword(pdev, pos, val);
+}
+
+static inline void msm_pcie_config_clock_mem(struct msm_pcie_dev_t *dev,
+ struct msm_pcie_clk_info_t *info)
+{
+ int ret;
+
+ ret = clk_set_flags(info->hdl, CLKFLAG_NORETAIN_MEM);
+ if (ret)
+ PCIE_ERR(dev,
+ "PCIe: RC%d can't configure core memory for clk %s: %d.\n",
+ dev->rc_idx, info->name, ret);
+ else
+ PCIE_DBG2(dev,
+ "PCIe: RC%d configured core memory for clk %s.\n",
+ dev->rc_idx, info->name);
+
+ ret = clk_set_flags(info->hdl, CLKFLAG_NORETAIN_PERIPH);
+ if (ret)
+ PCIE_ERR(dev,
+ "PCIe: RC%d can't configure peripheral memory for clk %s: %d.\n",
+ dev->rc_idx, info->name, ret);
+ else
+ PCIE_DBG2(dev,
+ "PCIe: RC%d configured peripheral memory for clk %s.\n",
+ dev->rc_idx, info->name);
+}
+
+static void msm_pcie_rumi_init(struct msm_pcie_dev_t *pcie_dev)
+{
+ u32 val;
+ u32 reset_offs = 0x04;
+ u32 phy_ctrl_offs = 0x40;
+
+ PCIE_DBG(pcie_dev, "PCIe: RC%d: enter.\n", pcie_dev->rc_idx);
+
+ val = readl_relaxed(pcie_dev->rumi + phy_ctrl_offs) | 0x1000;
+ writel_relaxed(val, pcie_dev->rumi + phy_ctrl_offs);
+ usleep_range(10000, 10001);
+
+ writel_relaxed(0x800, pcie_dev->rumi + reset_offs);
+ usleep_range(50000, 50001);
+ writel_relaxed(0xFFFFFFFF, pcie_dev->rumi + reset_offs);
+ usleep_range(50000, 50001);
+ writel_relaxed(0x800, pcie_dev->rumi + reset_offs);
+ usleep_range(50000, 50001);
+ writel_relaxed(0, pcie_dev->rumi + reset_offs);
+ usleep_range(50000, 50001);
+
+ val = readl_relaxed(pcie_dev->rumi + phy_ctrl_offs) & 0xFFFFEFFF;
+ writel_relaxed(val, pcie_dev->rumi + phy_ctrl_offs);
+ usleep_range(10000, 10001);
+
+ val = readl_relaxed(pcie_dev->rumi + phy_ctrl_offs) & 0xFFFFFFFE;
+ writel_relaxed(val, pcie_dev->rumi + phy_ctrl_offs);
+}
+
+static void pcie_phy_dump(struct msm_pcie_dev_t *dev)
+{
+ int i, size;
+
+ size = resource_size(dev->res[MSM_PCIE_RES_PHY].resource);
+ for (i = 0; i < size; i += 32) {
+ PCIE_DUMP(dev,
+ "PCIe PHY of RC%d: 0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ dev->rc_idx, i,
+ readl_relaxed(dev->phy + i),
+ readl_relaxed(dev->phy + (i + 4)),
+ readl_relaxed(dev->phy + (i + 8)),
+ readl_relaxed(dev->phy + (i + 12)),
+ readl_relaxed(dev->phy + (i + 16)),
+ readl_relaxed(dev->phy + (i + 20)),
+ readl_relaxed(dev->phy + (i + 24)),
+ readl_relaxed(dev->phy + (i + 28)));
+ }
+}
+
+static inline int msm_pcie_check_align(struct msm_pcie_dev_t *dev,
+ u32 offset)
+{
+ if (offset % 4) {
+ PCIE_ERR(dev,
+ "PCIe: RC%d: offset 0x%x is not correctly aligned\n",
+ dev->rc_idx, offset);
+ return MSM_PCIE_ERROR;
+ }
+
+ return 0;
+}
+
+static bool msm_pcie_confirm_linkup(struct msm_pcie_dev_t *dev,
+ bool check_sw_stts,
+ bool check_ep,
+ void __iomem *ep_conf)
+{
+ u32 val;
+
+ if (check_sw_stts && (dev->link_status != MSM_PCIE_LINK_ENABLED)) {
+ PCIE_DBG(dev, "PCIe: The link of RC %d is not enabled.\n",
+ dev->rc_idx);
+ return false;
+ }
+
+ if (!(readl_relaxed(dev->dm_core + 0x80) & BIT(29))) {
+ PCIE_DBG(dev, "PCIe: The link of RC %d is not up.\n",
+ dev->rc_idx);
+ return false;
+ }
+
+ val = readl_relaxed(dev->dm_core);
+ PCIE_DBG(dev, "PCIe: device ID and vender ID of RC %d are 0x%x.\n",
+ dev->rc_idx, val);
+ if (val == PCIE_LINK_DOWN) {
+ PCIE_ERR(dev,
+ "PCIe: The link of RC %d is not really up; device ID and vender ID of RC %d are 0x%x.\n",
+ dev->rc_idx, dev->rc_idx, val);
+ return false;
+ }
+
+ if (check_ep) {
+ val = readl_relaxed(ep_conf);
+ PCIE_DBG(dev,
+ "PCIe: device ID and vender ID of EP of RC %d are 0x%x.\n",
+ dev->rc_idx, val);
+ if (val == PCIE_LINK_DOWN) {
+ PCIE_ERR(dev,
+ "PCIe: The link of RC %d is not really up; device ID and vender ID of EP of RC %d are 0x%x.\n",
+ dev->rc_idx, dev->rc_idx, val);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void msm_pcie_cfg_recover(struct msm_pcie_dev_t *dev, bool rc)
+{
+ int i, j;
+ u32 val = 0;
+ u32 *shadow;
+ void __iomem *cfg = dev->conf;
+
+ for (i = 0; i < MAX_DEVICE_NUM; i++) {
+ if (!rc && !dev->pcidev_table[i].bdf)
+ break;
+ if (rc) {
+ cfg = dev->dm_core;
+ shadow = dev->rc_shadow;
+ } else {
+ if (!msm_pcie_confirm_linkup(dev, false, true,
+ dev->pcidev_table[i].conf_base))
+ continue;
+
+ shadow = dev->ep_shadow[i];
+ PCIE_DBG(dev,
+ "PCIe Device: %02x:%02x.%01x\n",
+ dev->pcidev_table[i].bdf >> 24,
+ dev->pcidev_table[i].bdf >> 19 & 0x1f,
+ dev->pcidev_table[i].bdf >> 16 & 0x07);
+ }
+ for (j = PCIE_CONF_SPACE_DW - 1; j >= 0; j--) {
+ val = shadow[j];
+ if (val != PCIE_CLEAR) {
+ PCIE_DBG3(dev,
+ "PCIe: before recovery:cfg 0x%x:0x%x\n",
+ j * 4, readl_relaxed(cfg + j * 4));
+ PCIE_DBG3(dev,
+ "PCIe: shadow_dw[%d]:cfg 0x%x:0x%x\n",
+ j, j * 4, val);
+ writel_relaxed(val, cfg + j * 4);
+ /* ensure changes propagated to the hardware */
+ wmb();
+ PCIE_DBG3(dev,
+ "PCIe: after recovery:cfg 0x%x:0x%x\n\n",
+ j * 4, readl_relaxed(cfg + j * 4));
+ }
+ }
+ if (rc)
+ break;
+
+ pci_save_state(dev->pcidev_table[i].dev);
+ cfg += SZ_4K;
+ }
+}
+
+static void msm_pcie_write_mask(void __iomem *addr,
+ uint32_t clear_mask, uint32_t set_mask)
+{
+ uint32_t val;
+
+ val = (readl_relaxed(addr) & ~clear_mask) | set_mask;
+ writel_relaxed(val, addr);
+ wmb(); /* ensure data is written to hardware register */
+}
+
+static void pcie_parf_dump(struct msm_pcie_dev_t *dev)
+{
+ int i, size;
+ u32 original;
+
+ PCIE_DUMP(dev, "PCIe: RC%d PARF testbus\n", dev->rc_idx);
+
+ original = readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL);
+ for (i = 1; i <= 0x1A; i++) {
+ msm_pcie_write_mask(dev->parf + PCIE20_PARF_SYS_CTRL,
+ 0xFF0000, i << 16);
+ PCIE_DUMP(dev,
+ "RC%d: PARF_SYS_CTRL: 0%08x PARF_TEST_BUS: 0%08x\n",
+ dev->rc_idx,
+ readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL),
+ readl_relaxed(dev->parf + PCIE20_PARF_TEST_BUS));
+ }
+ writel_relaxed(original, dev->parf + PCIE20_PARF_SYS_CTRL);
+
+ PCIE_DUMP(dev, "PCIe: RC%d PARF register dump\n", dev->rc_idx);
+
+ size = resource_size(dev->res[MSM_PCIE_RES_PARF].resource);
+ for (i = 0; i < size; i += 32) {
+ PCIE_DUMP(dev,
+ "RC%d: 0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ dev->rc_idx, i,
+ readl_relaxed(dev->parf + i),
+ readl_relaxed(dev->parf + (i + 4)),
+ readl_relaxed(dev->parf + (i + 8)),
+ readl_relaxed(dev->parf + (i + 12)),
+ readl_relaxed(dev->parf + (i + 16)),
+ readl_relaxed(dev->parf + (i + 20)),
+ readl_relaxed(dev->parf + (i + 24)),
+ readl_relaxed(dev->parf + (i + 28)));
+ }
+}
+
+static void msm_pcie_show_status(struct msm_pcie_dev_t *dev)
+{
+ PCIE_DBG_FS(dev, "PCIe: RC%d is %s enumerated\n",
+ dev->rc_idx, dev->enumerated ? "" : "not");
+ PCIE_DBG_FS(dev, "PCIe: link is %s\n",
+ (dev->link_status == MSM_PCIE_LINK_ENABLED)
+ ? "enabled" : "disabled");
+ PCIE_DBG_FS(dev, "cfg_access is %s allowed\n",
+ dev->cfg_access ? "" : "not");
+ PCIE_DBG_FS(dev, "use_msi is %d\n",
+ dev->use_msi);
+ PCIE_DBG_FS(dev, "use_pinctrl is %d\n",
+ dev->use_pinctrl);
+ PCIE_DBG_FS(dev, "use_19p2mhz_aux_clk is %d\n",
+ dev->use_19p2mhz_aux_clk);
+ PCIE_DBG_FS(dev, "user_suspend is %d\n",
+ dev->user_suspend);
+ PCIE_DBG_FS(dev, "num_ep: %d\n",
+ dev->num_ep);
+ PCIE_DBG_FS(dev, "num_active_ep: %d\n",
+ dev->num_active_ep);
+ PCIE_DBG_FS(dev, "pending_ep_reg: %s\n",
+ dev->pending_ep_reg ? "true" : "false");
+ PCIE_DBG_FS(dev, "phy_len is %d",
+ dev->phy_len);
+ PCIE_DBG_FS(dev, "disable_pc is %d",
+ dev->disable_pc);
+ PCIE_DBG_FS(dev, "l0s_supported is %s supported\n",
+ dev->l0s_supported ? "" : "not");
+ PCIE_DBG_FS(dev, "l1_supported is %s supported\n",
+ dev->l1_supported ? "" : "not");
+ PCIE_DBG_FS(dev, "l1ss_supported is %s supported\n",
+ dev->l1ss_supported ? "" : "not");
+ PCIE_DBG_FS(dev, "l1_1_pcipm_supported is %s supported\n",
+ dev->l1_1_pcipm_supported ? "" : "not");
+ PCIE_DBG_FS(dev, "l1_2_pcipm_supported is %s supported\n",
+ dev->l1_2_pcipm_supported ? "" : "not");
+ PCIE_DBG_FS(dev, "l1_1_aspm_supported is %s supported\n",
+ dev->l1_1_aspm_supported ? "" : "not");
+ PCIE_DBG_FS(dev, "l1_2_aspm_supported is %s supported\n",
+ dev->l1_2_aspm_supported ? "" : "not");
+ PCIE_DBG_FS(dev, "common_clk_en is %d\n",
+ dev->common_clk_en);
+ PCIE_DBG_FS(dev, "clk_power_manage_en is %d\n",
+ dev->clk_power_manage_en);
+ PCIE_DBG_FS(dev, "aux_clk_sync is %d\n",
+ dev->aux_clk_sync);
+ PCIE_DBG_FS(dev, "AER is %s enable\n",
+ dev->aer_enable ? "" : "not");
+ PCIE_DBG_FS(dev, "boot_option is 0x%x\n",
+ dev->boot_option);
+ PCIE_DBG_FS(dev, "phy_ver is %d\n",
+ dev->phy_ver);
+ PCIE_DBG_FS(dev, "drv_ready is %d\n",
+ dev->drv_ready);
+ PCIE_DBG_FS(dev, "linkdown_panic is %d\n",
+ dev->linkdown_panic);
+ PCIE_DBG_FS(dev, "the link is %s suspending\n",
+ dev->suspending ? "" : "not");
+ PCIE_DBG_FS(dev, "shadow is %s enabled\n",
+ dev->shadow_en ? "" : "not");
+ PCIE_DBG_FS(dev, "the power of RC is %s on\n",
+ dev->power_on ? "" : "not");
+ PCIE_DBG_FS(dev, "bus_client: %d\n",
+ dev->bus_client);
+ PCIE_DBG_FS(dev, "smmu_sid_base: 0x%x\n",
+ dev->smmu_sid_base);
+ PCIE_DBG_FS(dev, "n_fts: %d\n",
+ dev->n_fts);
+ PCIE_DBG_FS(dev, "ep_latency: %dms\n",
+ dev->ep_latency);
+ PCIE_DBG_FS(dev, "switch_latency: %dms\n",
+ dev->switch_latency);
+ PCIE_DBG_FS(dev, "wr_halt_size: 0x%x\n",
+ dev->wr_halt_size);
+ PCIE_DBG_FS(dev, "slv_addr_space_size: 0x%x\n",
+ dev->slv_addr_space_size);
+ PCIE_DBG_FS(dev, "phy_status_offset: 0x%x\n",
+ dev->phy_status_offset);
+ PCIE_DBG_FS(dev, "phy_power_down_offset: 0x%x\n",
+ dev->phy_power_down_offset);
+ PCIE_DBG_FS(dev, "cpl_timeout: 0x%x\n",
+ dev->cpl_timeout);
+ PCIE_DBG_FS(dev, "current_bdf: 0x%x\n",
+ dev->current_bdf);
+ PCIE_DBG_FS(dev, "perst_delay_us_min: %dus\n",
+ dev->perst_delay_us_min);
+ PCIE_DBG_FS(dev, "perst_delay_us_max: %dus\n",
+ dev->perst_delay_us_max);
+ PCIE_DBG_FS(dev, "tlp_rd_size: 0x%x\n",
+ dev->tlp_rd_size);
+ PCIE_DBG_FS(dev, "rc_corr_counter: %lu\n",
+ dev->rc_corr_counter);
+ PCIE_DBG_FS(dev, "rc_non_fatal_counter: %lu\n",
+ dev->rc_non_fatal_counter);
+ PCIE_DBG_FS(dev, "rc_fatal_counter: %lu\n",
+ dev->rc_fatal_counter);
+ PCIE_DBG_FS(dev, "ep_corr_counter: %lu\n",
+ dev->ep_corr_counter);
+ PCIE_DBG_FS(dev, "ep_non_fatal_counter: %lu\n",
+ dev->ep_non_fatal_counter);
+ PCIE_DBG_FS(dev, "ep_fatal_counter: %lu\n",
+ dev->ep_fatal_counter);
+ PCIE_DBG_FS(dev, "linkdown_counter: %lu\n",
+ dev->linkdown_counter);
+ PCIE_DBG_FS(dev, "wake_counter: %lu\n",
+ dev->wake_counter);
+ PCIE_DBG_FS(dev, "link_check_max_count: %u\n",
+ dev->link_check_max_count);
+ PCIE_DBG_FS(dev, "target_link_speed: 0x%x\n",
+ dev->target_link_speed);
+ PCIE_DBG_FS(dev, "link_turned_on_counter: %lu\n",
+ dev->link_turned_on_counter);
+ PCIE_DBG_FS(dev, "link_turned_off_counter: %lu\n",
+ dev->link_turned_off_counter);
+}
+
+static void msm_pcie_shadow_dump(struct msm_pcie_dev_t *dev, bool rc)
+{
+ int i, j;
+ u32 val = 0;
+ u32 *shadow;
+
+ for (i = 0; i < MAX_DEVICE_NUM; i++) {
+ if (!rc && !dev->pcidev_table[i].bdf)
+ break;
+ if (rc) {
+ shadow = dev->rc_shadow;
+ } else {
+ shadow = dev->ep_shadow[i];
+ PCIE_DBG_FS(dev, "PCIe Device: %02x:%02x.%01x\n",
+ dev->pcidev_table[i].bdf >> 24,
+ dev->pcidev_table[i].bdf >> 19 & 0x1f,
+ dev->pcidev_table[i].bdf >> 16 & 0x07);
+ }
+ for (j = 0; j < PCIE_CONF_SPACE_DW; j++) {
+ val = shadow[j];
+ if (val != PCIE_CLEAR) {
+ PCIE_DBG_FS(dev,
+ "PCIe: shadow_dw[%d]:cfg 0x%x:0x%x\n",
+ j, j * 4, val);
+ }
+ }
+ if (rc)
+ break;
+ }
+}
+
+static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev,
+ u32 testcase)
+{
+ u32 dbi_base_addr = dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
+ phys_addr_t loopback_lbar_phy =
+ dev->res[MSM_PCIE_RES_DM_CORE].resource->start +
+ LOOPBACK_BASE_ADDR_OFFSET;
+ static uint32_t loopback_val = 0x1;
+ static dma_addr_t loopback_ddr_phy;
+ static uint32_t *loopback_ddr_vir;
+ static void __iomem *loopback_lbar_vir;
+ int ret, i;
+ u32 base_sel_size = 0;
+
+ switch (testcase) {
+ case MSM_PCIE_OUTPUT_PCIE_INFO:
+ PCIE_DBG_FS(dev, "\n\nPCIe: Status for RC%d:\n",
+ dev->rc_idx);
+ msm_pcie_show_status(dev);
+ break;
+ case MSM_PCIE_DISABLE_LINK:
+ PCIE_DBG_FS(dev,
+ "\n\nPCIe: RC%d: disable link\n\n", dev->rc_idx);
+ ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, 0,
+ dev->dev, NULL,
+ MSM_PCIE_CONFIG_NO_CFG_RESTORE);
+ if (ret)
+ PCIE_DBG_FS(dev, "PCIe:%s:failed to disable link\n",
+ __func__);
+ else
+ PCIE_DBG_FS(dev, "PCIe:%s:disabled link\n",
+ __func__);
+ break;
+ case MSM_PCIE_ENABLE_LINK:
+ PCIE_DBG_FS(dev,
+ "\n\nPCIe: RC%d: enable link and recover config space\n\n",
+ dev->rc_idx);
+ ret = msm_pcie_pm_control(MSM_PCIE_RESUME, 0,
+ dev->dev, NULL,
+ MSM_PCIE_CONFIG_NO_CFG_RESTORE);
+ if (ret)
+ PCIE_DBG_FS(dev, "PCIe:%s:failed to enable link\n",
+ __func__);
+ else {
+ PCIE_DBG_FS(dev, "PCIe:%s:enabled link\n", __func__);
+ msm_pcie_recover_config(dev->dev);
+ }
+ break;
+ case MSM_PCIE_DISABLE_ENABLE_LINK:
+ PCIE_DBG_FS(dev,
+ "\n\nPCIe: RC%d: disable and enable link then recover config space\n\n",
+ dev->rc_idx);
+ ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, 0,
+ dev->dev, NULL,
+ MSM_PCIE_CONFIG_NO_CFG_RESTORE);
+ if (ret)
+ PCIE_DBG_FS(dev, "PCIe:%s:failed to disable link\n",
+ __func__);
+ else
+ PCIE_DBG_FS(dev, "PCIe:%s:disabled link\n", __func__);
+ ret = msm_pcie_pm_control(MSM_PCIE_RESUME, 0,
+ dev->dev, NULL,
+ MSM_PCIE_CONFIG_NO_CFG_RESTORE);
+ if (ret)
+ PCIE_DBG_FS(dev, "PCIe:%s:failed to enable link\n",
+ __func__);
+ else {
+ PCIE_DBG_FS(dev, "PCIe:%s:enabled link\n", __func__);
+ msm_pcie_recover_config(dev->dev);
+ }
+ break;
+ case MSM_PCIE_DUMP_SHADOW_REGISTER:
+ PCIE_DBG_FS(dev,
+ "\n\nPCIe: RC%d: dumping RC shadow registers\n",
+ dev->rc_idx);
+ msm_pcie_shadow_dump(dev, true);
+
+ PCIE_DBG_FS(dev,
+ "\n\nPCIe: RC%d: dumping EP shadow registers\n",
+ dev->rc_idx);
+ msm_pcie_shadow_dump(dev, false);
+ break;
+ case MSM_PCIE_DISABLE_L0S:
+ PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L0s\n\n",
+ dev->rc_idx);
+ if (dev->link_status == MSM_PCIE_LINK_ENABLED)
+ msm_pcie_config_l0s_disable_all(dev, dev->dev->bus);
+ dev->l0s_supported = false;
+ break;
+ case MSM_PCIE_ENABLE_L0S:
+ PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L0s\n\n",
+ dev->rc_idx);
+ dev->l0s_supported = true;
+ if (dev->link_status == MSM_PCIE_LINK_ENABLED)
+ msm_pcie_config_l0s_enable_all(dev);
+ break;
+ case MSM_PCIE_DISABLE_L1:
+ PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1\n\n",
+ dev->rc_idx);
+ if (dev->link_status == MSM_PCIE_LINK_ENABLED)
+ msm_pcie_config_l1_disable_all(dev, dev->dev->bus);
+ dev->l1_supported = false;
+ break;
+ case MSM_PCIE_ENABLE_L1:
+ PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1\n\n",
+ dev->rc_idx);
+ dev->l1_supported = true;
+ if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
+ /* enable l1 mode, clear bit 5 (REQ_NOT_ENTR_L1) */
+ msm_pcie_write_mask(dev->parf +
+ PCIE20_PARF_PM_CTRL, BIT(5), 0);
+
+ msm_pcie_config_l1_enable_all(dev);
+ }
+ break;
+ case MSM_PCIE_DISABLE_L1SS:
+ PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1ss\n\n",
+ dev->rc_idx);
+ if (dev->link_status == MSM_PCIE_LINK_ENABLED)
+ msm_pcie_config_l1ss_disable_all(dev, dev->dev->bus);
+ dev->l1ss_supported = false;
+ dev->l1_1_pcipm_supported = false;
+ dev->l1_2_pcipm_supported = false;
+ dev->l1_1_aspm_supported = false;
+ dev->l1_2_aspm_supported = false;
+ break;
+ case MSM_PCIE_ENABLE_L1SS:
+ PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1ss\n\n",
+ dev->rc_idx);
+ dev->l1ss_supported = true;
+ dev->l1_1_pcipm_supported = true;
+ dev->l1_2_pcipm_supported = true;
+ dev->l1_1_aspm_supported = true;
+ dev->l1_2_aspm_supported = true;
+ if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
+ msm_pcie_check_l1ss_support_all(dev);
+ msm_pcie_config_l1ss_enable_all(dev);
+ }
+ break;
+ case MSM_PCIE_ENUMERATION:
+ PCIE_DBG_FS(dev, "\n\nPCIe: attempting to enumerate RC%d\n\n",
+ dev->rc_idx);
+ if (dev->enumerated)
+ PCIE_DBG_FS(dev, "PCIe: RC%d is already enumerated\n",
+ dev->rc_idx);
+ else {
+ if (!msm_pcie_enumerate(dev->rc_idx))
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d is successfully enumerated\n",
+ dev->rc_idx);
+ else
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d enumeration failed\n",
+ dev->rc_idx);
+ }
+ break;
+ case MSM_PCIE_READ_PCIE_REGISTER:
+ PCIE_DBG_FS(dev,
+ "\n\nPCIe: RC%d: read a PCIe register\n\n",
+ dev->rc_idx);
+ if (!base_sel) {
+ PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
+ break;
+ }
+
+ PCIE_DBG_FS(dev, "base: %s: 0x%pK\nwr_offset: 0x%x\n",
+ dev->res[base_sel - 1].name,
+ dev->res[base_sel - 1].base,
+ wr_offset);
+
+ base_sel_size = resource_size(dev->res[base_sel - 1].resource);
+
+ if (wr_offset > base_sel_size - 4 ||
+ msm_pcie_check_align(dev, wr_offset)) {
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: Invalid wr_offset: 0x%x. wr_offset should be no more than 0x%x\n",
+ dev->rc_idx, wr_offset, base_sel_size - 4);
+ } else {
+ phys_addr_t wr_register =
+ dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
+
+ wr_register += wr_offset;
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: register: 0x%pa value: 0x%x\n",
+ dev->rc_idx, &wr_register,
+ readl_relaxed(dev->res[base_sel - 1].base +
+ wr_offset));
+ }
+
+ break;
+ case MSM_PCIE_WRITE_PCIE_REGISTER:
+ PCIE_DBG_FS(dev,
+ "\n\nPCIe: RC%d: writing a value to a register\n\n",
+ dev->rc_idx);
+
+ if (!base_sel) {
+ PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
+ break;
+ }
+
+ if (((base_sel - 1) >= MSM_PCIE_MAX_RES) ||
+ (!dev->res[base_sel - 1].resource)) {
+ PCIE_DBG_FS(dev, "PCIe: RC%d Resource does not exist\n",
+ dev->rc_idx);
+ break;
+ }
+
+ PCIE_DBG_FS(dev,
+ "base: %s: 0x%pK\nwr_offset: 0x%x\nwr_mask: 0x%x\nwr_value: 0x%x\n",
+ dev->res[base_sel - 1].name,
+ dev->res[base_sel - 1].base,
+ wr_offset, wr_mask, wr_value);
+
+ base_sel_size = resource_size(dev->res[base_sel - 1].resource);
+
+ if (wr_offset > base_sel_size - 4 ||
+ msm_pcie_check_align(dev, wr_offset))
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: Invalid wr_offset: 0x%x. wr_offset should be no more than 0x%x\n",
+ dev->rc_idx, wr_offset, base_sel_size - 4);
+ else
+ msm_pcie_write_reg_field(dev->res[base_sel - 1].base,
+ wr_offset, wr_mask, wr_value);
+
+ break;
+ case MSM_PCIE_DUMP_PCIE_REGISTER_SPACE:
+ if (((base_sel - 1) >= MSM_PCIE_MAX_RES) ||
+ (!dev->res[base_sel - 1].resource)) {
+ PCIE_DBG_FS(dev, "PCIe: RC%d Resource does not exist\n",
+ dev->rc_idx);
+ break;
+ }
+
+ if (!base_sel) {
+ PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
+ break;
+ } else if (base_sel - 1 == MSM_PCIE_RES_PARF) {
+ pcie_parf_dump(dev);
+ break;
+ } else if (base_sel - 1 == MSM_PCIE_RES_PHY) {
+ pcie_phy_dump(dev);
+ break;
+ } else if (base_sel - 1 == MSM_PCIE_RES_CONF) {
+ base_sel_size = 0x1000;
+ } else {
+ base_sel_size = resource_size(
+ dev->res[base_sel - 1].resource);
+ }
+
+ PCIE_DBG_FS(dev, "\n\nPCIe: Dumping %s Registers for RC%d\n\n",
+ dev->res[base_sel - 1].name, dev->rc_idx);
+
+ for (i = 0; i < base_sel_size; i += 32) {
+ PCIE_DBG_FS(dev,
+ "0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ i, readl_relaxed(dev->res[base_sel - 1].base + i),
+ readl_relaxed(dev->res[base_sel - 1].base + (i + 4)),
+ readl_relaxed(dev->res[base_sel - 1].base + (i + 8)),
+ readl_relaxed(dev->res[base_sel - 1].base + (i + 12)),
+ readl_relaxed(dev->res[base_sel - 1].base + (i + 16)),
+ readl_relaxed(dev->res[base_sel - 1].base + (i + 20)),
+ readl_relaxed(dev->res[base_sel - 1].base + (i + 24)),
+ readl_relaxed(dev->res[base_sel - 1].base + (i + 28)));
+ }
+ break;
+ case MSM_PCIE_ALLOCATE_DDR_MAP_LBAR:
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: Allocate 4K DDR memory and map LBAR.\n",
+ dev->rc_idx);
+ loopback_ddr_vir = dma_alloc_coherent(&dev->pdev->dev,
+ (SZ_1K * sizeof(*loopback_ddr_vir)),
+ &loopback_ddr_phy, GFP_KERNEL);
+ if (!loopback_ddr_vir) {
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: failed to dma_alloc_coherent.\n",
+ dev->rc_idx);
+ } else {
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: VIR DDR memory address: 0x%pK\n",
+ dev->rc_idx, loopback_ddr_vir);
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: PHY DDR memory address: %pad\n",
+ dev->rc_idx, &loopback_ddr_phy);
+ }
+
+ PCIE_DBG_FS(dev, "PCIe: RC%d: map LBAR: %pa\n",
+ dev->rc_idx, &loopback_lbar_phy);
+ loopback_lbar_vir = devm_ioremap(&dev->pdev->dev,
+ loopback_lbar_phy, SZ_4K);
+ if (!loopback_lbar_vir) {
+ PCIE_DBG_FS(dev, "PCIe: RC%d: failed to map %pa\n",
+ dev->rc_idx, &loopback_lbar_phy);
+ } else {
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: successfully mapped %pa to 0x%pK\n",
+ dev->rc_idx, &loopback_lbar_phy,
+ loopback_lbar_vir);
+ }
+ break;
+ case MSM_PCIE_FREE_DDR_UNMAP_LBAR:
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: Release 4K DDR memory and unmap LBAR.\n",
+ dev->rc_idx);
+
+ if (loopback_ddr_vir) {
+ dma_free_coherent(&dev->pdev->dev, SZ_4K,
+ loopback_ddr_vir, loopback_ddr_phy);
+ loopback_ddr_vir = NULL;
+ }
+
+ if (loopback_lbar_vir) {
+ devm_iounmap(&dev->pdev->dev,
+ loopback_lbar_vir);
+ loopback_lbar_vir = NULL;
+ }
+ break;
+ case MSM_PCIE_OUTPUT_DDR_LBAR_ADDRESS:
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: Print DDR and LBAR addresses.\n",
+ dev->rc_idx);
+
+ if (!loopback_ddr_vir || !loopback_lbar_vir) {
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: DDR or LBAR address is not mapped\n",
+ dev->rc_idx);
+ break;
+ }
+
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: PHY DDR address: %pad\n",
+ dev->rc_idx, &loopback_ddr_phy);
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: VIR DDR address: 0x%pK\n",
+ dev->rc_idx, loopback_ddr_vir);
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: PHY LBAR address: %pa\n",
+ dev->rc_idx, &loopback_lbar_phy);
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: VIR LBAR address: 0x%pK\n",
+ dev->rc_idx, loopback_lbar_vir);
+ break;
+ case MSM_PCIE_CONFIGURE_LOOPBACK:
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: Configure Loopback.\n",
+ dev->rc_idx);
+
+ writel_relaxed(0x10000,
+ dev->dm_core + PCIE_GEN3_RELATED);
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: 0x%x: 0x%x\n",
+ dev->rc_idx,
+ dbi_base_addr + PCIE_GEN3_RELATED,
+ readl_relaxed(dev->dm_core +
+ PCIE_GEN3_RELATED));
+
+ writel_relaxed(0x80000001,
+ dev->dm_core + PCIE20_PIPE_LOOPBACK_CONTROL);
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: 0x%x: 0x%x\n",
+ dev->rc_idx,
+ dbi_base_addr + PCIE20_PIPE_LOOPBACK_CONTROL,
+ readl_relaxed(dev->dm_core +
+ PCIE20_PIPE_LOOPBACK_CONTROL));
+
+ writel_relaxed(0x00010124,
+ dev->dm_core + PCIE20_PORT_LINK_CTRL_REG);
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: 0x%x: 0x%x\n",
+ dev->rc_idx,
+ dbi_base_addr + PCIE20_PORT_LINK_CTRL_REG,
+ readl_relaxed(dev->dm_core +
+ PCIE20_PORT_LINK_CTRL_REG));
+ break;
+ case MSM_PCIE_SETUP_LOOPBACK_IATU:
+ {
+ void __iomem *iatu_base_vir;
+ u32 iatu_base_phy;
+ u32 iatu_viewport_offset;
+ u32 iatu_ctrl1_offset;
+ u32 iatu_ctrl2_offset;
+ u32 iatu_lbar_offset;
+ u32 iatu_ubar_offset;
+ u32 iatu_lar_offset;
+ u32 iatu_ltar_offset;
+ u32 iatu_utar_offset;
+ u32 iatu_n = 1;
+
+ if (dev->iatu) {
+ iatu_base_vir = dev->iatu;
+ iatu_base_phy =
+ dev->res[MSM_PCIE_RES_IATU].resource->start;
+
+ iatu_viewport_offset = 0;
+ iatu_ctrl1_offset = PCIE_IATU_CTRL1(iatu_n);
+ iatu_ctrl2_offset = PCIE_IATU_CTRL2(iatu_n);
+ iatu_lbar_offset = PCIE_IATU_LBAR(iatu_n);
+ iatu_ubar_offset = PCIE_IATU_UBAR(iatu_n);
+ iatu_lar_offset = PCIE_IATU_LAR(iatu_n);
+ iatu_ltar_offset = PCIE_IATU_LTAR(iatu_n);
+ iatu_utar_offset = PCIE_IATU_UTAR(iatu_n);
+ } else {
+ iatu_base_vir = dev->dm_core;
+ iatu_base_phy = dbi_base_addr;
+
+ iatu_viewport_offset = PCIE20_PLR_IATU_VIEWPORT;
+ iatu_ctrl1_offset = PCIE20_PLR_IATU_CTRL1;
+ iatu_ctrl2_offset = PCIE20_PLR_IATU_CTRL2;
+ iatu_lbar_offset = PCIE20_PLR_IATU_LBAR;
+ iatu_ubar_offset = PCIE20_PLR_IATU_UBAR;
+ iatu_lar_offset = PCIE20_PLR_IATU_LAR;
+ iatu_ltar_offset = PCIE20_PLR_IATU_LTAR;
+ iatu_utar_offset = PCIE20_PLR_IATU_UTAR;
+ }
+
+ PCIE_DBG_FS(dev, "PCIe: RC%d: Setup iATU.\n", dev->rc_idx);
+
+ if (!loopback_ddr_vir) {
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: DDR address is not mapped.\n",
+ dev->rc_idx);
+ break;
+ }
+
+ if (iatu_viewport_offset) {
+ writel_relaxed(0x0, iatu_base_vir +
+ iatu_viewport_offset);
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: PCIE20_PLR_IATU_VIEWPORT:\t0x%x: 0x%x\n",
+ dev->rc_idx,
+ iatu_base_phy + iatu_viewport_offset,
+ readl_relaxed(iatu_base_vir +
+ iatu_viewport_offset));
+ }
+
+ writel_relaxed(0x0, iatu_base_vir + iatu_ctrl1_offset);
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: PCIE20_PLR_IATU_CTRL1:\t0x%x: 0x%x\n",
+ dev->rc_idx, iatu_base_phy + iatu_ctrl1_offset,
+ readl_relaxed(iatu_base_vir + iatu_ctrl1_offset));
+
+ writel_relaxed(loopback_lbar_phy,
+ iatu_base_vir + iatu_lbar_offset);
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: PCIE20_PLR_IATU_LBAR:\t0x%x: 0x%x\n",
+ dev->rc_idx, iatu_base_phy + iatu_lbar_offset,
+ readl_relaxed(iatu_base_vir + iatu_lbar_offset));
+
+ writel_relaxed(0x0, iatu_base_vir + iatu_ubar_offset);
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: PCIE20_PLR_IATU_UBAR:\t0x%x: 0x%x\n",
+ dev->rc_idx, iatu_base_phy + iatu_ubar_offset,
+ readl_relaxed(iatu_base_vir + iatu_ubar_offset));
+
+ writel_relaxed(loopback_lbar_phy + 0xfff,
+ iatu_base_vir + iatu_lar_offset);
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: PCIE20_PLR_IATU_LAR:\t0x%x: 0x%x\n",
+ dev->rc_idx, iatu_base_phy + iatu_lar_offset,
+ readl_relaxed(iatu_base_vir + iatu_lar_offset));
+
+ writel_relaxed(loopback_ddr_phy,
+ iatu_base_vir + iatu_ltar_offset);
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: PCIE20_PLR_IATU_LTAR:\t0x%x: 0x%x\n",
+ dev->rc_idx, iatu_base_phy + iatu_ltar_offset,
+ readl_relaxed(iatu_base_vir + iatu_ltar_offset));
+
+ writel_relaxed(0, iatu_base_vir + iatu_utar_offset);
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: PCIE20_PLR_IATU_UTAR:\t0x%x: 0x%x\n",
+ dev->rc_idx, iatu_base_phy + iatu_utar_offset,
+ readl_relaxed(iatu_base_vir + iatu_utar_offset));
+
+ writel_relaxed(0x80000000,
+ iatu_base_vir + iatu_ctrl2_offset);
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: PCIE20_PLR_IATU_CTRL2:\t0x%x: 0x%x\n",
+ dev->rc_idx, iatu_base_phy + iatu_ctrl2_offset,
+ readl_relaxed(iatu_base_vir + iatu_ctrl2_offset));
+ break;
+ }
+ case MSM_PCIE_READ_DDR:
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: Read DDR values.\n",
+ dev->rc_idx);
+
+ if (!loopback_ddr_vir) {
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: DDR is not mapped\n",
+ dev->rc_idx);
+ break;
+ }
+
+ for (i = 0; i < SZ_1K; i += 8) {
+ PCIE_DBG_FS(dev,
+ "0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ i,
+ loopback_ddr_vir[i],
+ loopback_ddr_vir[i + 1],
+ loopback_ddr_vir[i + 2],
+ loopback_ddr_vir[i + 3],
+ loopback_ddr_vir[i + 4],
+ loopback_ddr_vir[i + 5],
+ loopback_ddr_vir[i + 6],
+ loopback_ddr_vir[i + 7]);
+ }
+ break;
+ case MSM_PCIE_READ_LBAR:
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: Read LBAR values.\n",
+ dev->rc_idx);
+
+ if (!loopback_lbar_vir) {
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: LBAR address is not mapped\n",
+ dev->rc_idx);
+ break;
+ }
+
+ for (i = 0; i < SZ_4K; i += 32) {
+ PCIE_DBG_FS(dev,
+ "0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ i,
+ readl_relaxed(loopback_lbar_vir + i),
+ readl_relaxed(loopback_lbar_vir + (i + 4)),
+ readl_relaxed(loopback_lbar_vir + (i + 8)),
+ readl_relaxed(loopback_lbar_vir + (i + 12)),
+ readl_relaxed(loopback_lbar_vir + (i + 16)),
+ readl_relaxed(loopback_lbar_vir + (i + 20)),
+ readl_relaxed(loopback_lbar_vir + (i + 24)),
+ readl_relaxed(loopback_lbar_vir + (i + 28)));
+ }
+ break;
+ case MSM_PCIE_WRITE_DDR:
+ PCIE_DBG_FS(dev, "PCIe: RC%d: Write 0x%x to DDR.\n",
+ dev->rc_idx, loopback_val);
+
+ if (!loopback_ddr_vir) {
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: DDR address is not mapped\n",
+ dev->rc_idx);
+ break;
+ }
+
+ memset(loopback_ddr_vir, loopback_val,
+ (SZ_1K * sizeof(*loopback_ddr_vir)));
+
+ if (unlikely(loopback_val == UINT_MAX))
+ loopback_val = 1;
+ else
+ loopback_val++;
+ break;
+ case MSM_PCIE_WRITE_LBAR:
+ PCIE_DBG_FS(dev, "PCIe: RC%d: Write 0x%x to LBAR.\n",
+ dev->rc_idx, loopback_val);
+
+ if (!loopback_lbar_vir) {
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: LBAR address is not mapped\n",
+ dev->rc_idx);
+ break;
+ }
+
+ for (i = 0; i < SZ_4K; i += 32) {
+ writel_relaxed(loopback_val,
+ loopback_lbar_vir + i),
+ writel_relaxed(loopback_val,
+ loopback_lbar_vir + (i + 4)),
+ writel_relaxed(loopback_val,
+ loopback_lbar_vir + (i + 8)),
+ writel_relaxed(loopback_val,
+ loopback_lbar_vir + (i + 12)),
+ writel_relaxed(loopback_val,
+ loopback_lbar_vir + (i + 16)),
+ writel_relaxed(loopback_val,
+ loopback_lbar_vir + (i + 20)),
+ writel_relaxed(loopback_val,
+ loopback_lbar_vir + (i + 24)),
+ writel_relaxed(loopback_val,
+ loopback_lbar_vir + (i + 28));
+ }
+
+ if (unlikely(loopback_val == UINT_MAX))
+ loopback_val = 1;
+ else
+ loopback_val++;
+ break;
+ case MSM_PCIE_DISABLE_AER:
+ PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: clear AER enable flag\n\n",
+ dev->rc_idx);
+ dev->aer_enable = false;
+ break;
+ case MSM_PCIE_ENABLE_AER:
+ PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: set AER enable flag\n\n",
+ dev->rc_idx);
+ dev->aer_enable = true;
+ break;
+ case MSM_PCIE_GPIO_STATUS:
+ PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: PERST and WAKE status\n\n",
+ dev->rc_idx);
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: PERST: gpio%u value: %d\n",
+ dev->rc_idx, dev->gpio[MSM_PCIE_GPIO_PERST].num,
+ gpio_get_value(dev->gpio[MSM_PCIE_GPIO_PERST].num));
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: WAKE: gpio%u value: %d\n",
+ dev->rc_idx, dev->gpio[MSM_PCIE_GPIO_WAKE].num,
+ gpio_get_value(dev->gpio[MSM_PCIE_GPIO_WAKE].num));
+ break;
+ case MSM_PCIE_ASSERT_PERST:
+ PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: assert PERST\n\n",
+ dev->rc_idx);
+ gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
+ dev->gpio[MSM_PCIE_GPIO_PERST].on);
+ usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max);
+ break;
+ case MSM_PCIE_DEASSERT_PERST:
+ PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: de-assert PERST\n\n",
+ dev->rc_idx);
+ gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
+ 1 - dev->gpio[MSM_PCIE_GPIO_PERST].on);
+ usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max);
+ break;
+ case MSM_PCIE_KEEP_RESOURCES_ON:
+ PCIE_DBG_FS(dev,
+ "\n\nPCIe: RC%d: set keep resources on flag\n\n",
+ dev->rc_idx);
+ msm_pcie_keep_resources_on |= BIT(dev->rc_idx);
+ break;
+ case MSM_PCIE_FORCE_GEN1:
+ PCIE_DBG_FS(dev,
+ "\n\nPCIe: RC%d: set target speed to Gen 1\n\n",
+ dev->rc_idx);
+ dev->target_link_speed = GEN1_SPEED;
+ break;
+ case MSM_PCIE_FORCE_GEN2:
+ PCIE_DBG_FS(dev,
+ "\n\nPCIe: RC%d: set target speed to Gen 2\n\n",
+ dev->rc_idx);
+ dev->target_link_speed = GEN2_SPEED;
+ break;
+ case MSM_PCIE_FORCE_GEN3:
+ PCIE_DBG_FS(dev,
+ "\n\nPCIe: RC%d: set target speed to Gen 3\n\n",
+ dev->rc_idx);
+ dev->target_link_speed = GEN3_SPEED;
+ break;
+ default:
+ PCIE_DBG_FS(dev, "Invalid testcase: %d.\n", testcase);
+ break;
+ }
+}
+
+int msm_pcie_debug_info(struct pci_dev *dev, u32 option, u32 base,
+ u32 offset, u32 mask, u32 value)
+{
+ int ret = 0;
+ struct msm_pcie_dev_t *pdev = NULL;
+
+ if (!dev) {
+ pr_err("PCIe: the input pci dev is NULL.\n");
+ return -ENODEV;
+ }
+
+ if (option == MSM_PCIE_READ_PCIE_REGISTER ||
+ option == MSM_PCIE_WRITE_PCIE_REGISTER ||
+ option == MSM_PCIE_DUMP_PCIE_REGISTER_SPACE) {
+ if (!base || base >= MSM_PCIE_MAX_RES) {
+ PCIE_DBG_FS(pdev, "Invalid base_sel: 0x%x\n", base);
+ PCIE_DBG_FS(pdev,
+ "PCIe: base_sel is still 0x%x\n", base_sel);
+ return -EINVAL;
+ }
+
+ base_sel = base;
+ PCIE_DBG_FS(pdev, "PCIe: base_sel is now 0x%x\n", base_sel);
+
+ if (option == MSM_PCIE_READ_PCIE_REGISTER ||
+ option == MSM_PCIE_WRITE_PCIE_REGISTER) {
+ wr_offset = offset;
+ wr_mask = mask;
+ wr_value = value;
+
+ PCIE_DBG_FS(pdev,
+ "PCIe: wr_offset is now 0x%x\n", wr_offset);
+ PCIE_DBG_FS(pdev,
+ "PCIe: wr_mask is now 0x%x\n", wr_mask);
+ PCIE_DBG_FS(pdev,
+ "PCIe: wr_value is now 0x%x\n", wr_value);
+ }
+ }
+
+ pdev = PCIE_BUS_PRIV_DATA(dev->bus);
+ rc_sel = BIT(pdev->rc_idx);
+
+ msm_pcie_sel_debug_testcase(pdev, option);
+
+ return ret;
+}
+EXPORT_SYMBOL(msm_pcie_debug_info);
+
+#ifdef CONFIG_SYSFS
+static ssize_t enumerate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)
+ dev_get_drvdata(dev);
+
+ if (pcie_dev)
+ msm_pcie_enumerate(pcie_dev->rc_idx);
+
+ return count;
+}
+static DEVICE_ATTR_WO(enumerate);
+
+static void msm_pcie_sysfs_init(struct msm_pcie_dev_t *dev)
+{
+ int ret;
+
+ ret = device_create_file(&dev->pdev->dev, &dev_attr_enumerate);
+ if (ret)
+ PCIE_DBG_FS(dev,
+ "RC%d: failed to create sysfs enumerate node\n",
+ dev->rc_idx);
+}
+
+static void msm_pcie_sysfs_exit(struct msm_pcie_dev_t *dev)
+{
+ if (dev->pdev)
+ device_remove_file(&dev->pdev->dev, &dev_attr_enumerate);
+}
+#else
+static void msm_pcie_sysfs_init(struct msm_pcie_dev_t *dev)
+{
+}
+
+static void msm_pcie_sysfs_exit(struct msm_pcie_dev_t *dev)
+{
+}
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *dent_msm_pcie;
+static struct dentry *dfile_rc_sel;
+static struct dentry *dfile_case;
+static struct dentry *dfile_base_sel;
+static struct dentry *dfile_linkdown_panic;
+static struct dentry *dfile_wr_offset;
+static struct dentry *dfile_wr_mask;
+static struct dentry *dfile_wr_value;
+static struct dentry *dfile_boot_option;
+static struct dentry *dfile_aer_enable;
+static struct dentry *dfile_corr_counter_limit;
+
+static u32 rc_sel_max;
+
+static int msm_pcie_debugfs_parse_input(const char __user *buf,
+ size_t count, unsigned int *data)
+{
+ unsigned long ret;
+ char *str, *str_temp;
+
+ str = kmalloc(count + 1, GFP_KERNEL);
+ if (!str)
+ return -ENOMEM;
+
+ ret = copy_from_user(str, buf, count);
+ if (ret) {
+ kfree(str);
+ return -EFAULT;
+ }
+
+ str[count] = 0;
+ str_temp = str;
+
+ ret = get_option(&str_temp, data);
+ kfree(str);
+ if (ret != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int msm_pcie_debugfs_case_show(struct seq_file *m, void *v)
+{
+ int i;
+
+ for (i = 0; i < MSM_PCIE_MAX_DEBUGFS_OPTION; i++)
+ seq_printf(m, "\t%d:\t %s\n", i,
+ msm_pcie_debugfs_option_desc[i]);
+
+ return 0;
+}
+
+static int msm_pcie_debugfs_case_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, msm_pcie_debugfs_case_show, NULL);
+}
+
+static ssize_t msm_pcie_debugfs_case_select(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int i, ret;
+ unsigned int testcase = 0;
+
+ ret = msm_pcie_debugfs_parse_input(buf, count, &testcase);
+ if (ret)
+ return ret;
+
+ pr_alert("PCIe: TEST: %d\n", testcase);
+
+ for (i = 0; i < MAX_RC_NUM; i++) {
+ if (rc_sel & BIT(i))
+ msm_pcie_sel_debug_testcase(&msm_pcie_dev[i], testcase);
+ }
+
+ return count;
+}
+
+static const struct file_operations msm_pcie_debugfs_case_ops = {
+ .open = msm_pcie_debugfs_case_open,
+ .release = single_release,
+ .read = seq_read,
+ .write = msm_pcie_debugfs_case_select,
+};
+
+static ssize_t msm_pcie_debugfs_rc_select(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int i, ret;
+ u32 new_rc_sel = 0;
+
+ ret = msm_pcie_debugfs_parse_input(buf, count, &new_rc_sel);
+ if (ret)
+ return ret;
+
+ if ((!new_rc_sel) || (new_rc_sel > rc_sel_max)) {
+ pr_alert("PCIe: invalid value for rc_sel: 0x%x\n", new_rc_sel);
+ pr_alert("PCIe: rc_sel is still 0x%x\n", rc_sel ? rc_sel : 0x1);
+ } else {
+ rc_sel = new_rc_sel;
+ pr_alert("PCIe: rc_sel is now: 0x%x\n", rc_sel);
+ }
+
+ pr_alert("PCIe: the following RC(s) will be tested:\n");
+ for (i = 0; i < MAX_RC_NUM; i++)
+ if (rc_sel & BIT(i))
+ pr_alert("RC %d\n", i);
+
+ return count;
+}
+
+static const struct file_operations msm_pcie_debugfs_rc_select_ops = {
+ .write = msm_pcie_debugfs_rc_select,
+};
+
+static ssize_t msm_pcie_debugfs_base_select(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+ u32 new_base_sel = 0;
+
+ ret = msm_pcie_debugfs_parse_input(buf, count, &new_base_sel);
+ if (ret)
+ return ret;
+
+ if (!new_base_sel || new_base_sel > MSM_PCIE_MAX_RES) {
+ pr_alert("PCIe: invalid value for base_sel: 0x%x\n",
+ new_base_sel);
+ pr_alert("PCIe: base_sel is still 0x%x\n", base_sel);
+ } else {
+ base_sel = new_base_sel;
+ pr_alert("PCIe: base_sel is now 0x%x\n", base_sel);
+ pr_alert("%s\n", msm_pcie_res_info[base_sel - 1].name);
+ }
+
+ return count;
+}
+
+static const struct file_operations msm_pcie_debugfs_base_select_ops = {
+ .write = msm_pcie_debugfs_base_select,
+};
+
+static ssize_t msm_pcie_debugfs_linkdown_panic(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int i, ret;
+ u32 new_linkdown_panic = 0;
+
+ ret = msm_pcie_debugfs_parse_input(buf, count, &new_linkdown_panic);
+ if (ret)
+ return ret;
+
+ new_linkdown_panic = !!new_linkdown_panic;
+
+ for (i = 0; i < MAX_RC_NUM; i++) {
+ if (rc_sel & BIT(i)) {
+ msm_pcie_dev[i].linkdown_panic =
+ new_linkdown_panic;
+ PCIE_DBG_FS(&msm_pcie_dev[i],
+ "PCIe: RC%d: linkdown_panic is now %d\n",
+ i, msm_pcie_dev[i].linkdown_panic);
+ }
+ }
+
+ return count;
+}
+
+static const struct file_operations msm_pcie_debugfs_linkdown_panic_ops = {
+ .write = msm_pcie_debugfs_linkdown_panic,
+};
+
+static ssize_t msm_pcie_debugfs_wr_offset(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+
+ wr_offset = 0;
+
+ ret = msm_pcie_debugfs_parse_input(buf, count, &wr_offset);
+ if (ret)
+ return ret;
+
+ pr_alert("PCIe: wr_offset is now 0x%x\n", wr_offset);
+
+ return count;
+}
+
+static const struct file_operations msm_pcie_debugfs_wr_offset_ops = {
+ .write = msm_pcie_debugfs_wr_offset,
+};
+
+static ssize_t msm_pcie_debugfs_wr_mask(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+
+ wr_mask = 0;
+
+ ret = msm_pcie_debugfs_parse_input(buf, count, &wr_mask);
+ if (ret)
+ return ret;
+
+ pr_alert("PCIe: wr_mask is now 0x%x\n", wr_mask);
+
+ return count;
+}
+
+static const struct file_operations msm_pcie_debugfs_wr_mask_ops = {
+ .write = msm_pcie_debugfs_wr_mask,
+};
+static ssize_t msm_pcie_debugfs_wr_value(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+
+ wr_value = 0;
+
+ ret = msm_pcie_debugfs_parse_input(buf, count, &wr_value);
+ if (ret)
+ return ret;
+
+ pr_alert("PCIe: wr_value is now 0x%x\n", wr_value);
+
+ return count;
+}
+
+static const struct file_operations msm_pcie_debugfs_wr_value_ops = {
+ .write = msm_pcie_debugfs_wr_value,
+};
+
+static ssize_t msm_pcie_debugfs_boot_option(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int i, ret;
+ u32 new_boot_option = 0;
+
+ ret = msm_pcie_debugfs_parse_input(buf, count, &new_boot_option);
+ if (ret)
+ return ret;
+
+ if (new_boot_option <= (BIT(0) | BIT(1))) {
+ for (i = 0; i < MAX_RC_NUM; i++) {
+ if (rc_sel & BIT(i)) {
+ msm_pcie_dev[i].boot_option = new_boot_option;
+ PCIE_DBG_FS(&msm_pcie_dev[i],
+ "PCIe: RC%d: boot_option is now 0x%x\n",
+ i, msm_pcie_dev[i].boot_option);
+ }
+ }
+ } else {
+ pr_err("PCIe: Invalid input for boot_option: 0x%x.\n",
+ new_boot_option);
+ }
+
+ return count;
+}
+
+static const struct file_operations msm_pcie_debugfs_boot_option_ops = {
+ .write = msm_pcie_debugfs_boot_option,
+};
+
+static ssize_t msm_pcie_debugfs_aer_enable(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int i, ret;
+ u32 new_aer_enable = 0;
+
+ ret = msm_pcie_debugfs_parse_input(buf, count, &new_aer_enable);
+ if (ret)
+ return ret;
+
+ new_aer_enable = !!new_aer_enable;
+
+ for (i = 0; i < MAX_RC_NUM; i++) {
+ if (rc_sel & BIT(i)) {
+ msm_pcie_dev[i].aer_enable = new_aer_enable;
+ PCIE_DBG_FS(&msm_pcie_dev[i],
+ "PCIe: RC%d: aer_enable is now %d\n",
+ i, msm_pcie_dev[i].aer_enable);
+
+ msm_pcie_write_mask(msm_pcie_dev[i].dm_core +
+ PCIE20_BRIDGE_CTRL,
+ new_aer_enable ? 0 : BIT(16),
+ new_aer_enable ? BIT(16) : 0);
+
+ PCIE_DBG_FS(&msm_pcie_dev[i],
+ "RC%d: PCIE20_BRIDGE_CTRL: 0x%x\n", i,
+ readl_relaxed(msm_pcie_dev[i].dm_core +
+ PCIE20_BRIDGE_CTRL));
+ }
+ }
+
+ return count;
+}
+
+static const struct file_operations msm_pcie_debugfs_aer_enable_ops = {
+ .write = msm_pcie_debugfs_aer_enable,
+};
+
+static ssize_t msm_pcie_debugfs_corr_counter_limit(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+
+ corr_counter_limit = 0;
+
+ ret = msm_pcie_debugfs_parse_input(buf, count, &corr_counter_limit);
+ if (ret)
+ return ret;
+
+ pr_info("PCIe: corr_counter_limit is now %u\n", corr_counter_limit);
+
+ return count;
+}
+
+static const struct file_operations msm_pcie_debugfs_corr_counter_limit_ops = {
+ .write = msm_pcie_debugfs_corr_counter_limit,
+};
+
+static void msm_pcie_debugfs_init(void)
+{
+ rc_sel_max = (0x1 << MAX_RC_NUM) - 1;
+ wr_mask = 0xffffffff;
+
+ dent_msm_pcie = debugfs_create_dir("pci-msm", NULL);
+ if (IS_ERR(dent_msm_pcie)) {
+ pr_err("PCIe: fail to create the folder for debug_fs.\n");
+ return;
+ }
+
+ dfile_rc_sel = debugfs_create_file("rc_sel", 0664,
+ dent_msm_pcie, NULL,
+ &msm_pcie_debugfs_rc_select_ops);
+ if (!dfile_rc_sel || IS_ERR(dfile_rc_sel)) {
+ pr_err("PCIe: fail to create the file for debug_fs rc_sel.\n");
+ goto rc_sel_error;
+ }
+
+ dfile_case = debugfs_create_file("case", 0664,
+ dent_msm_pcie, NULL,
+ &msm_pcie_debugfs_case_ops);
+ if (!dfile_case || IS_ERR(dfile_case)) {
+ pr_err("PCIe: fail to create the file for debug_fs case.\n");
+ goto case_error;
+ }
+
+ dfile_base_sel = debugfs_create_file("base_sel", 0664,
+ dent_msm_pcie, NULL,
+ &msm_pcie_debugfs_base_select_ops);
+ if (!dfile_base_sel || IS_ERR(dfile_base_sel)) {
+ pr_err("PCIe: fail to create the file for debug_fs base_sel.\n");
+ goto base_sel_error;
+ }
+
+ dfile_linkdown_panic = debugfs_create_file("linkdown_panic", 0644,
+ dent_msm_pcie, NULL,
+ &msm_pcie_debugfs_linkdown_panic_ops);
+ if (!dfile_linkdown_panic || IS_ERR(dfile_linkdown_panic)) {
+ pr_err("PCIe: fail to create the file for debug_fs linkdown_panic.\n");
+ goto linkdown_panic_error;
+ }
+
+ dfile_wr_offset = debugfs_create_file("wr_offset", 0664,
+ dent_msm_pcie, NULL,
+ &msm_pcie_debugfs_wr_offset_ops);
+ if (!dfile_wr_offset || IS_ERR(dfile_wr_offset)) {
+ pr_err("PCIe: fail to create the file for debug_fs wr_offset.\n");
+ goto wr_offset_error;
+ }
+
+ dfile_wr_mask = debugfs_create_file("wr_mask", 0664,
+ dent_msm_pcie, NULL,
+ &msm_pcie_debugfs_wr_mask_ops);
+ if (!dfile_wr_mask || IS_ERR(dfile_wr_mask)) {
+ pr_err("PCIe: fail to create the file for debug_fs wr_mask.\n");
+ goto wr_mask_error;
+ }
+
+ dfile_wr_value = debugfs_create_file("wr_value", 0664,
+ dent_msm_pcie, NULL,
+ &msm_pcie_debugfs_wr_value_ops);
+ if (!dfile_wr_value || IS_ERR(dfile_wr_value)) {
+ pr_err("PCIe: fail to create the file for debug_fs wr_value.\n");
+ goto wr_value_error;
+ }
+
+ dfile_boot_option = debugfs_create_file("boot_option", 0664,
+ dent_msm_pcie, NULL,
+ &msm_pcie_debugfs_boot_option_ops);
+ if (!dfile_boot_option || IS_ERR(dfile_boot_option)) {
+ pr_err("PCIe: fail to create the file for debug_fs boot_option.\n");
+ goto boot_option_error;
+ }
+
+ dfile_aer_enable = debugfs_create_file("aer_enable", 0664,
+ dent_msm_pcie, NULL,
+ &msm_pcie_debugfs_aer_enable_ops);
+ if (!dfile_aer_enable || IS_ERR(dfile_aer_enable)) {
+ pr_err("PCIe: fail to create the file for debug_fs aer_enable.\n");
+ goto aer_enable_error;
+ }
+
+ dfile_corr_counter_limit = debugfs_create_file("corr_counter_limit",
+ 0664, dent_msm_pcie, NULL,
+ &msm_pcie_debugfs_corr_counter_limit_ops);
+ if (!dfile_corr_counter_limit || IS_ERR(dfile_corr_counter_limit)) {
+ pr_err("PCIe: fail to create the file for debug_fs corr_counter_limit.\n");
+ goto corr_counter_limit_error;
+ }
+ return;
+
+corr_counter_limit_error:
+ debugfs_remove(dfile_aer_enable);
+aer_enable_error:
+ debugfs_remove(dfile_boot_option);
+boot_option_error:
+ debugfs_remove(dfile_wr_value);
+wr_value_error:
+ debugfs_remove(dfile_wr_mask);
+wr_mask_error:
+ debugfs_remove(dfile_wr_offset);
+wr_offset_error:
+ debugfs_remove(dfile_linkdown_panic);
+linkdown_panic_error:
+ debugfs_remove(dfile_base_sel);
+base_sel_error:
+ debugfs_remove(dfile_case);
+case_error:
+ debugfs_remove(dfile_rc_sel);
+rc_sel_error:
+ debugfs_remove(dent_msm_pcie);
+}
+
+static void msm_pcie_debugfs_exit(void)
+{
+ debugfs_remove(dfile_rc_sel);
+ debugfs_remove(dfile_case);
+ debugfs_remove(dfile_base_sel);
+ debugfs_remove(dfile_linkdown_panic);
+ debugfs_remove(dfile_wr_offset);
+ debugfs_remove(dfile_wr_mask);
+ debugfs_remove(dfile_wr_value);
+ debugfs_remove(dfile_boot_option);
+ debugfs_remove(dfile_aer_enable);
+ debugfs_remove(dfile_corr_counter_limit);
+}
+#else
+static void msm_pcie_debugfs_init(void)
+{
+}
+
+static void msm_pcie_debugfs_exit(void)
+{
+}
+#endif
+
+static inline int msm_pcie_is_link_up(struct msm_pcie_dev_t *dev)
+{
+ return readl_relaxed(dev->dm_core +
+ PCIE20_CAP_LINKCTRLSTATUS) & BIT(29);
+}
+
+/**
+ * msm_pcie_iatu_config - configure outbound address translation region
+ * @dev: root commpex
+ * @nr: region number
+ * @type: target transaction type, see PCIE20_CTRL1_TYPE_xxx
+ * @host_addr: - region start address on host
+ * @host_end: - region end address (low 32 bit) on host,
+ * upper 32 bits are same as for @host_addr
+ * @target_addr: - region start address on target
+ */
+static void msm_pcie_iatu_config(struct msm_pcie_dev_t *dev, int nr, u8 type,
+ unsigned long host_addr, u32 host_end,
+ unsigned long target_addr)
+{
+ void __iomem *iatu_base = dev->iatu ? dev->iatu : dev->dm_core;
+
+ u32 iatu_viewport_offset;
+ u32 iatu_ctrl1_offset;
+ u32 iatu_ctrl2_offset;
+ u32 iatu_lbar_offset;
+ u32 iatu_ubar_offset;
+ u32 iatu_lar_offset;
+ u32 iatu_ltar_offset;
+ u32 iatu_utar_offset;
+
+ if (dev->iatu) {
+ iatu_viewport_offset = 0;
+ iatu_ctrl1_offset = PCIE_IATU_CTRL1(nr);
+ iatu_ctrl2_offset = PCIE_IATU_CTRL2(nr);
+ iatu_lbar_offset = PCIE_IATU_LBAR(nr);
+ iatu_ubar_offset = PCIE_IATU_UBAR(nr);
+ iatu_lar_offset = PCIE_IATU_LAR(nr);
+ iatu_ltar_offset = PCIE_IATU_LTAR(nr);
+ iatu_utar_offset = PCIE_IATU_UTAR(nr);
+ } else {
+ iatu_viewport_offset = PCIE20_PLR_IATU_VIEWPORT;
+ iatu_ctrl1_offset = PCIE20_PLR_IATU_CTRL1;
+ iatu_ctrl2_offset = PCIE20_PLR_IATU_CTRL2;
+ iatu_lbar_offset = PCIE20_PLR_IATU_LBAR;
+ iatu_ubar_offset = PCIE20_PLR_IATU_UBAR;
+ iatu_lar_offset = PCIE20_PLR_IATU_LAR;
+ iatu_ltar_offset = PCIE20_PLR_IATU_LTAR;
+ iatu_utar_offset = PCIE20_PLR_IATU_UTAR;
+ }
+
+ if (dev->shadow_en && iatu_viewport_offset) {
+ dev->rc_shadow[PCIE20_PLR_IATU_VIEWPORT / 4] =
+ nr;
+ dev->rc_shadow[PCIE20_PLR_IATU_CTRL1 / 4] =
+ type;
+ dev->rc_shadow[PCIE20_PLR_IATU_LBAR / 4] =
+ lower_32_bits(host_addr);
+ dev->rc_shadow[PCIE20_PLR_IATU_UBAR / 4] =
+ upper_32_bits(host_addr);
+ dev->rc_shadow[PCIE20_PLR_IATU_LAR / 4] =
+ host_end;
+ dev->rc_shadow[PCIE20_PLR_IATU_LTAR / 4] =
+ lower_32_bits(target_addr);
+ dev->rc_shadow[PCIE20_PLR_IATU_UTAR / 4] =
+ upper_32_bits(target_addr);
+ dev->rc_shadow[PCIE20_PLR_IATU_CTRL2 / 4] =
+ BIT(31);
+ }
+
+ /* select region */
+ if (iatu_viewport_offset) {
+ writel_relaxed(nr, iatu_base + iatu_viewport_offset);
+ /* ensure that hardware locks it */
+ wmb();
+ }
+
+ /* switch off region before changing it */
+ writel_relaxed(0, iatu_base + iatu_ctrl2_offset);
+ /* and wait till it propagates to the hardware */
+ wmb();
+
+ writel_relaxed(type, iatu_base + iatu_ctrl1_offset);
+ writel_relaxed(lower_32_bits(host_addr),
+ iatu_base + iatu_lbar_offset);
+ writel_relaxed(upper_32_bits(host_addr),
+ iatu_base + iatu_ubar_offset);
+ writel_relaxed(host_end, iatu_base + iatu_lar_offset);
+ writel_relaxed(lower_32_bits(target_addr),
+ iatu_base + iatu_ltar_offset);
+ writel_relaxed(upper_32_bits(target_addr),
+ iatu_base + iatu_utar_offset);
+ /* ensure that changes propagated to the hardware */
+ wmb();
+ writel_relaxed(BIT(31), iatu_base + iatu_ctrl2_offset);
+
+ /* ensure that changes propagated to the hardware */
+ wmb();
+
+ if (dev->enumerated) {
+ PCIE_DBG2(dev, "IATU for Endpoint %02x:%02x.%01x\n",
+ dev->pcidev_table[nr].bdf >> 24,
+ dev->pcidev_table[nr].bdf >> 19 & 0x1f,
+ dev->pcidev_table[nr].bdf >> 16 & 0x07);
+ if (iatu_viewport_offset)
+ PCIE_DBG2(dev, "IATU_VIEWPORT:0x%x\n",
+ readl_relaxed(dev->dm_core +
+ PCIE20_PLR_IATU_VIEWPORT));
+ PCIE_DBG2(dev, "IATU_CTRL1:0x%x\n",
+ readl_relaxed(iatu_base + iatu_ctrl1_offset));
+ PCIE_DBG2(dev, "IATU_LBAR:0x%x\n",
+ readl_relaxed(iatu_base + iatu_lbar_offset));
+ PCIE_DBG2(dev, "IATU_UBAR:0x%x\n",
+ readl_relaxed(iatu_base + iatu_ubar_offset));
+ PCIE_DBG2(dev, "IATU_LAR:0x%x\n",
+ readl_relaxed(iatu_base + iatu_lar_offset));
+ PCIE_DBG2(dev, "IATU_LTAR:0x%x\n",
+ readl_relaxed(iatu_base + iatu_ltar_offset));
+ PCIE_DBG2(dev, "IATU_UTAR:0x%x\n",
+ readl_relaxed(iatu_base + iatu_utar_offset));
+ PCIE_DBG2(dev, "IATU_CTRL2:0x%x\n\n",
+ readl_relaxed(iatu_base + iatu_ctrl2_offset));
+ }
+}
+
+/**
+ * msm_pcie_cfg_bdf - configure for config access
+ * @dev: root commpex
+ * @bus: PCI bus number
+ * @devfn: PCI dev and function number
+ *
+ * Remap if required region 0 for config access of proper type
+ * (CFG0 for bus 1, CFG1 for other buses)
+ * Cache current device bdf for speed-up
+ */
+static void msm_pcie_cfg_bdf(struct msm_pcie_dev_t *dev, u8 bus, u8 devfn)
+{
+ struct resource *axi_conf = dev->res[MSM_PCIE_RES_CONF].resource;
+ u32 bdf = BDF_OFFSET(bus, devfn);
+ u8 type = bus == 1 ? PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
+
+ if (dev->current_bdf == bdf)
+ return;
+
+ msm_pcie_iatu_config(dev, 0, type,
+ axi_conf->start,
+ axi_conf->start + SZ_4K - 1,
+ bdf);
+
+ dev->current_bdf = bdf;
+}
+
+static inline void msm_pcie_save_shadow(struct msm_pcie_dev_t *dev,
+ u32 word_offset, u32 wr_val,
+ u32 bdf, bool rc)
+{
+ int i, j;
+ u32 max_dev = MAX_RC_NUM * MAX_DEVICE_NUM;
+
+ if (rc) {
+ dev->rc_shadow[word_offset / 4] = wr_val;
+ } else {
+ for (i = 0; i < MAX_DEVICE_NUM; i++) {
+ if (!dev->pcidev_table[i].bdf) {
+ for (j = 0; j < max_dev; j++)
+ if (!msm_pcie_dev_tbl[j].bdf) {
+ msm_pcie_dev_tbl[j].bdf = bdf;
+ break;
+ }
+ dev->pcidev_table[i].bdf = bdf;
+ if ((!dev->bridge_found) && (i > 0))
+ dev->bridge_found = true;
+ }
+ if (dev->pcidev_table[i].bdf == bdf) {
+ dev->ep_shadow[i][word_offset / 4] = wr_val;
+ break;
+ }
+ }
+ }
+}
+
+static inline int msm_pcie_oper_conf(struct pci_bus *bus, u32 devfn, int oper,
+ int where, int size, u32 *val)
+{
+ uint32_t word_offset, byte_offset, mask;
+ uint32_t rd_val, wr_val;
+ struct msm_pcie_dev_t *dev;
+ void __iomem *config_base;
+ bool rc = false;
+ u32 rc_idx;
+ int rv = 0;
+ u32 bdf = BDF_OFFSET(bus->number, devfn);
+ int i;
+
+ dev = PCIE_BUS_PRIV_DATA(bus);
+
+ if (!dev) {
+ pr_err("PCIe: No device found for this bus.\n");
+ *val = ~0;
+ rv = PCIBIOS_DEVICE_NOT_FOUND;
+ goto out;
+ }
+
+ rc_idx = dev->rc_idx;
+ rc = (bus->number == 0);
+
+ spin_lock_irqsave(&dev->cfg_lock, dev->irqsave_flags);
+
+ if (!dev->cfg_access) {
+ PCIE_DBG3(dev,
+ "Access denied for RC%d %d:0x%02x + 0x%04x[%d]\n",
+ rc_idx, bus->number, devfn, where, size);
+ *val = ~0;
+ rv = PCIBIOS_DEVICE_NOT_FOUND;
+ goto unlock;
+ }
+
+ if (rc && (devfn != 0)) {
+ PCIE_DBG3(dev, "RC%d invalid %s - bus %d devfn %d\n", rc_idx,
+ (oper == RD) ? "rd" : "wr", bus->number, devfn);
+ *val = ~0;
+ rv = PCIBIOS_DEVICE_NOT_FOUND;
+ goto unlock;
+ }
+
+ if (dev->link_status != MSM_PCIE_LINK_ENABLED) {
+ PCIE_DBG3(dev,
+ "Access to RC%d %d:0x%02x + 0x%04x[%d] is denied because link is down\n",
+ rc_idx, bus->number, devfn, where, size);
+ *val = ~0;
+ rv = PCIBIOS_DEVICE_NOT_FOUND;
+ goto unlock;
+ }
+
+ /* check if the link is up for endpoint */
+ if (!rc && !msm_pcie_is_link_up(dev)) {
+ PCIE_ERR(dev,
+ "PCIe: RC%d %s fail, link down - bus %d devfn %d\n",
+ rc_idx, (oper == RD) ? "rd" : "wr",
+ bus->number, devfn);
+ *val = ~0;
+ rv = PCIBIOS_DEVICE_NOT_FOUND;
+ goto unlock;
+ }
+
+ if (!rc && !dev->enumerated)
+ msm_pcie_cfg_bdf(dev, bus->number, devfn);
+
+ word_offset = where & ~0x3;
+ byte_offset = where & 0x3;
+ mask = ((u32)~0 >> (8 * (4 - size))) << (8 * byte_offset);
+
+ if (rc || !dev->enumerated) {
+ config_base = rc ? dev->dm_core : dev->conf;
+ } else {
+ for (i = 0; i < MAX_DEVICE_NUM; i++) {
+ if (dev->pcidev_table[i].bdf == bdf) {
+ config_base = dev->pcidev_table[i].conf_base;
+ break;
+ }
+ }
+ if (i == MAX_DEVICE_NUM) {
+ *val = ~0;
+ rv = PCIBIOS_DEVICE_NOT_FOUND;
+ goto unlock;
+ }
+ }
+
+ rd_val = readl_relaxed(config_base + word_offset);
+
+ if (oper == RD) {
+ *val = ((rd_val & mask) >> (8 * byte_offset));
+ PCIE_DBG3(dev,
+ "RC%d %d:0x%02x + 0x%04x[%d] -> 0x%08x; rd 0x%08x\n",
+ rc_idx, bus->number, devfn, where, size, *val, rd_val);
+ } else {
+ wr_val = (rd_val & ~mask) |
+ ((*val << (8 * byte_offset)) & mask);
+
+ if ((bus->number == 0) && (where == 0x3c))
+ wr_val = wr_val | (3 << 16);
+
+ writel_relaxed(wr_val, config_base + word_offset);
+ wmb(); /* ensure config data is written to hardware register */
+
+ if (dev->shadow_en) {
+ if (rd_val == PCIE_LINK_DOWN &&
+ (readl_relaxed(config_base) == PCIE_LINK_DOWN))
+ PCIE_ERR(dev,
+ "Read of RC%d %d:0x%02x + 0x%04x[%d] is all FFs\n",
+ rc_idx, bus->number, devfn,
+ where, size);
+ else
+ msm_pcie_save_shadow(dev, word_offset, wr_val,
+ bdf, rc);
+ }
+
+ PCIE_DBG3(dev,
+ "RC%d %d:0x%02x + 0x%04x[%d] <- 0x%08x; rd 0x%08x val 0x%08x\n",
+ rc_idx, bus->number, devfn, where, size,
+ wr_val, rd_val, *val);
+ }
+
+unlock:
+ spin_unlock_irqrestore(&dev->cfg_lock, dev->irqsave_flags);
+out:
+ return rv;
+}
+
+static int msm_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 *val)
+{
+ int ret = msm_pcie_oper_conf(bus, devfn, RD, where, size, val);
+
+ if ((bus->number == 0) && (where == PCI_CLASS_REVISION)) {
+ *val = (*val & 0xff) | (PCI_CLASS_BRIDGE_PCI << 16);
+ PCIE_GEN_DBG("change class for RC:0x%x\n", *val);
+ }
+
+ return ret;
+}
+
+static int msm_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ return msm_pcie_oper_conf(bus, devfn, WR, where, size, &val);
+}
+
+static struct pci_ops msm_pcie_ops = {
+ .read = msm_pcie_rd_conf,
+ .write = msm_pcie_wr_conf,
+};
+
+static int msm_pcie_gpio_init(struct msm_pcie_dev_t *dev)
+{
+ int rc = 0, i;
+ struct msm_pcie_gpio_info_t *info;
+
+ PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+ for (i = 0; i < dev->gpio_n; i++) {
+ info = &dev->gpio[i];
+
+ if (!info->num)
+ continue;
+
+ rc = gpio_request(info->num, info->name);
+ if (rc) {
+ PCIE_ERR(dev, "PCIe: RC%d can't get gpio %s; %d\n",
+ dev->rc_idx, info->name, rc);
+ break;
+ }
+
+ if (info->out)
+ rc = gpio_direction_output(info->num, info->init);
+ else
+ rc = gpio_direction_input(info->num);
+ if (rc) {
+ PCIE_ERR(dev,
+ "PCIe: RC%d can't set direction for GPIO %s:%d\n",
+ dev->rc_idx, info->name, rc);
+ gpio_free(info->num);
+ break;
+ }
+ }
+
+ if (rc)
+ while (i--)
+ gpio_free(dev->gpio[i].num);
+
+ return rc;
+}
+
+static void msm_pcie_gpio_deinit(struct msm_pcie_dev_t *dev)
+{
+ int i;
+
+ PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+ for (i = 0; i < dev->gpio_n; i++)
+ gpio_free(dev->gpio[i].num);
+}
+
+static int msm_pcie_vreg_init(struct msm_pcie_dev_t *dev)
+{
+ int i, rc = 0;
+ struct regulator *vreg;
+ struct msm_pcie_vreg_info_t *info;
+
+ PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
+
+ for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
+ info = &dev->vreg[i];
+ vreg = info->hdl;
+
+ if (!vreg)
+ continue;
+
+ PCIE_DBG2(dev, "RC%d Vreg %s is being enabled\n",
+ dev->rc_idx, info->name);
+ if (info->max_v) {
+ rc = regulator_set_voltage(vreg,
+ info->min_v, info->max_v);
+ if (rc) {
+ PCIE_ERR(dev,
+ "PCIe: RC%d can't set voltage for %s: %d\n",
+ dev->rc_idx, info->name, rc);
+ break;
+ }
+ }
+
+ if (info->opt_mode) {
+ rc = regulator_set_load(vreg, info->opt_mode);
+ if (rc < 0) {
+ PCIE_ERR(dev,
+ "PCIe: RC%d can't set mode for %s: %d\n",
+ dev->rc_idx, info->name, rc);
+ break;
+ }
+ }
+
+ rc = regulator_enable(vreg);
+ if (rc) {
+ PCIE_ERR(dev,
+ "PCIe: RC%d can't enable regulator %s: %d\n",
+ dev->rc_idx, info->name, rc);
+ break;
+ }
+ }
+
+ if (rc)
+ while (i--) {
+ struct regulator *hdl = dev->vreg[i].hdl;
+
+ if (hdl) {
+ regulator_disable(hdl);
+ if (!strcmp(dev->vreg[i].name, "vreg-cx")) {
+ PCIE_DBG(dev,
+ "RC%d: Removing %s vote.\n",
+ dev->rc_idx,
+ dev->vreg[i].name);
+ regulator_set_voltage(hdl,
+ RPMH_REGULATOR_LEVEL_RETENTION,
+ RPMH_REGULATOR_LEVEL_MAX);
+ }
+
+ if (dev->vreg[i].opt_mode) {
+ rc = regulator_set_load(hdl, 0);
+ if (rc < 0)
+ PCIE_ERR(dev,
+ "PCIe: RC%d can't set mode for %s: %d\n",
+ dev->rc_idx,
+ dev->vreg[i].name, rc);
+ }
+ }
+
+ }
+
+ PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
+
+ return rc;
+}
+
+static void msm_pcie_vreg_deinit(struct msm_pcie_dev_t *dev)
+{
+ int i, ret;
+
+ PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
+
+ for (i = MSM_PCIE_MAX_VREG - 1; i >= 0; i--) {
+ if (dev->vreg[i].hdl) {
+ PCIE_DBG(dev, "Vreg %s is being disabled\n",
+ dev->vreg[i].name);
+ regulator_disable(dev->vreg[i].hdl);
+
+ if (!strcmp(dev->vreg[i].name, "vreg-cx")) {
+ PCIE_DBG(dev,
+ "RC%d: Removing %s vote.\n",
+ dev->rc_idx,
+ dev->vreg[i].name);
+ regulator_set_voltage(dev->vreg[i].hdl,
+ RPMH_REGULATOR_LEVEL_RETENTION,
+ RPMH_REGULATOR_LEVEL_MAX);
+ }
+
+ if (dev->vreg[i].opt_mode) {
+ ret = regulator_set_load(dev->vreg[i].hdl, 0);
+ if (ret < 0)
+ PCIE_ERR(dev,
+ "PCIe: RC%d can't set mode for %s: %d\n",
+ dev->rc_idx, dev->vreg[i].name,
+ ret);
+ }
+ }
+ }
+
+ PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
+}
+
+static int msm_pcie_clk_init(struct msm_pcie_dev_t *dev)
+{
+ int i, rc = 0;
+ struct msm_pcie_clk_info_t *info;
+ struct msm_pcie_reset_info_t *reset_info;
+
+ PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
+
+ rc = regulator_enable(dev->gdsc);
+
+ if (rc) {
+ PCIE_ERR(dev, "PCIe: fail to enable GDSC for RC%d (%s)\n",
+ dev->rc_idx, dev->pdev->name);
+ return rc;
+ }
+
+ PCIE_DBG(dev, "PCIe: requesting bus vote for RC%d\n", dev->rc_idx);
+ if (dev->bus_client) {
+ rc = msm_bus_scale_client_update_request(dev->bus_client, 1);
+ if (rc) {
+ PCIE_ERR(dev,
+ "PCIe: fail to set bus bandwidth for RC%d:%d.\n",
+ dev->rc_idx, rc);
+ return rc;
+ }
+
+ PCIE_DBG2(dev,
+ "PCIe: set bus bandwidth for RC%d.\n",
+ dev->rc_idx);
+ }
+
+ for (i = 0; i < MSM_PCIE_MAX_CLK; i++) {
+ info = &dev->clk[i];
+
+ if (!info->hdl)
+ continue;
+
+ if (info->config_mem)
+ msm_pcie_config_clock_mem(dev, info);
+
+ if (info->freq) {
+ rc = clk_set_rate(info->hdl, info->freq);
+ if (rc) {
+ PCIE_ERR(dev,
+ "PCIe: RC%d can't set rate for clk %s: %d.\n",
+ dev->rc_idx, info->name, rc);
+ break;
+ }
+
+ PCIE_DBG2(dev,
+ "PCIe: RC%d set rate for clk %s.\n",
+ dev->rc_idx, info->name);
+ }
+
+ rc = clk_prepare_enable(info->hdl);
+
+ if (rc)
+ PCIE_ERR(dev, "PCIe: RC%d failed to enable clk %s\n",
+ dev->rc_idx, info->name);
+ else
+ PCIE_DBG2(dev, "enable clk %s for RC%d.\n",
+ info->name, dev->rc_idx);
+ }
+
+ if (rc) {
+ PCIE_DBG(dev, "RC%d disable clocks for error handling.\n",
+ dev->rc_idx);
+ while (i--) {
+ struct clk *hdl = dev->clk[i].hdl;
+
+ if (hdl)
+ clk_disable_unprepare(hdl);
+ }
+
+ regulator_disable(dev->gdsc);
+ }
+
+ for (i = 0; i < MSM_PCIE_MAX_RESET; i++) {
+ reset_info = &dev->reset[i];
+ if (reset_info->hdl) {
+ rc = reset_control_assert(reset_info->hdl);
+ if (rc)
+ PCIE_ERR(dev,
+ "PCIe: RC%d failed to assert reset for %s.\n",
+ dev->rc_idx, reset_info->name);
+ else
+ PCIE_DBG2(dev,
+ "PCIe: RC%d successfully asserted reset for %s.\n",
+ dev->rc_idx, reset_info->name);
+
+ /* add a 1ms delay to ensure the reset is asserted */
+ usleep_range(1000, 1005);
+
+ rc = reset_control_deassert(reset_info->hdl);
+ if (rc)
+ PCIE_ERR(dev,
+ "PCIe: RC%d failed to deassert reset for %s.\n",
+ dev->rc_idx, reset_info->name);
+ else
+ PCIE_DBG2(dev,
+ "PCIe: RC%d successfully deasserted reset for %s.\n",
+ dev->rc_idx, reset_info->name);
+ }
+ }
+
+ PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
+
+ return rc;
+}
+
+static void msm_pcie_clk_deinit(struct msm_pcie_dev_t *dev)
+{
+ int i;
+ int rc;
+
+ PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
+
+ for (i = 0; i < MSM_PCIE_MAX_CLK; i++)
+ if (dev->clk[i].hdl)
+ clk_disable_unprepare(dev->clk[i].hdl);
+
+ if (dev->bus_client) {
+ PCIE_DBG(dev, "PCIe: removing bus vote for RC%d\n",
+ dev->rc_idx);
+
+ rc = msm_bus_scale_client_update_request(dev->bus_client, 0);
+ if (rc)
+ PCIE_ERR(dev,
+ "PCIe: fail to relinquish bus bandwidth for RC%d:%d.\n",
+ dev->rc_idx, rc);
+ else
+ PCIE_DBG(dev,
+ "PCIe: relinquish bus bandwidth for RC%d.\n",
+ dev->rc_idx);
+ }
+
+ regulator_disable(dev->gdsc);
+
+ PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
+}
+
+static int msm_pcie_pipe_clk_init(struct msm_pcie_dev_t *dev)
+{
+ int i, rc = 0;
+ struct msm_pcie_clk_info_t *info;
+ struct msm_pcie_reset_info_t *pipe_reset_info;
+
+ PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
+
+ for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++) {
+ info = &dev->pipeclk[i];
+
+ if (!info->hdl)
+ continue;
+
+ if (info->config_mem)
+ msm_pcie_config_clock_mem(dev, info);
+
+ if (info->freq) {
+ rc = clk_set_rate(info->hdl, info->freq);
+ if (rc) {
+ PCIE_ERR(dev,
+ "PCIe: RC%d can't set rate for clk %s: %d.\n",
+ dev->rc_idx, info->name, rc);
+ break;
+ }
+
+ PCIE_DBG2(dev,
+ "PCIe: RC%d set rate for clk %s: %d.\n",
+ dev->rc_idx, info->name, rc);
+ }
+
+ rc = clk_prepare_enable(info->hdl);
+
+ if (rc)
+ PCIE_ERR(dev, "PCIe: RC%d failed to enable clk %s.\n",
+ dev->rc_idx, info->name);
+ else
+ PCIE_DBG2(dev, "RC%d enabled pipe clk %s.\n",
+ dev->rc_idx, info->name);
+ }
+
+ if (rc) {
+ PCIE_DBG(dev, "RC%d disable pipe clocks for error handling.\n",
+ dev->rc_idx);
+ while (i--)
+ if (dev->pipeclk[i].hdl)
+ clk_disable_unprepare(dev->pipeclk[i].hdl);
+ }
+
+ for (i = 0; i < MSM_PCIE_MAX_PIPE_RESET; i++) {
+ pipe_reset_info = &dev->pipe_reset[i];
+ if (pipe_reset_info->hdl) {
+ rc = reset_control_assert(pipe_reset_info->hdl);
+ if (rc)
+ PCIE_ERR(dev,
+ "PCIe: RC%d failed to assert pipe reset for %s.\n",
+ dev->rc_idx, pipe_reset_info->name);
+ else
+ PCIE_DBG2(dev,
+ "PCIe: RC%d successfully asserted pipe reset for %s.\n",
+ dev->rc_idx, pipe_reset_info->name);
+
+ /* add a 1ms delay to ensure the reset is asserted */
+ usleep_range(1000, 1005);
+
+ rc = reset_control_deassert(
+ pipe_reset_info->hdl);
+ if (rc)
+ PCIE_ERR(dev,
+ "PCIe: RC%d failed to deassert pipe reset for %s.\n",
+ dev->rc_idx, pipe_reset_info->name);
+ else
+ PCIE_DBG2(dev,
+ "PCIe: RC%d successfully deasserted pipe reset for %s.\n",
+ dev->rc_idx, pipe_reset_info->name);
+ }
+ }
+
+ PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
+
+ return rc;
+}
+
+static void msm_pcie_pipe_clk_deinit(struct msm_pcie_dev_t *dev)
+{
+ int i;
+
+ PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
+
+ for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++)
+ if (dev->pipeclk[i].hdl)
+ clk_disable_unprepare(
+ dev->pipeclk[i].hdl);
+
+ PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
+}
+
+static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
+{
+ /* There is no PHY status check in RUMI */
+ if (dev->rumi)
+ return true;
+
+ if (readl_relaxed(dev->phy + dev->phy_status_offset) & BIT(6))
+ return false;
+ else
+ return true;
+}
+
+static int pcie_phy_init(struct msm_pcie_dev_t *dev)
+{
+ int i, ret;
+ long retries = 0;
+ struct msm_pcie_phy_info_t *phy_seq;
+
+ PCIE_DBG(dev, "PCIe: RC%d: Initializing PHY\n", dev->rc_idx);
+
+ if (dev->phy_sequence) {
+ i = dev->phy_len;
+ phy_seq = dev->phy_sequence;
+ while (i--) {
+ msm_pcie_write_reg(dev->phy,
+ phy_seq->offset,
+ phy_seq->val);
+ if (phy_seq->delay)
+ usleep_range(phy_seq->delay,
+ phy_seq->delay + 1);
+ phy_seq++;
+ }
+ }
+
+ usleep_range(PHY_STABILIZATION_DELAY_US_MIN,
+ PHY_STABILIZATION_DELAY_US_MAX);
+
+ /* Enable the pipe clock */
+ ret = msm_pcie_pipe_clk_init(dev);
+
+ /* ensure that changes propagated to the hardware */
+ wmb();
+
+ PCIE_DBG(dev, "PCIe RC%d: waiting for phy ready...\n", dev->rc_idx);
+ do {
+ if (pcie_phy_is_ready(dev))
+ break;
+ retries++;
+ usleep_range(REFCLK_STABILIZATION_DELAY_US_MIN,
+ REFCLK_STABILIZATION_DELAY_US_MAX);
+ } while (retries < PHY_READY_TIMEOUT_COUNT);
+
+ PCIE_DBG(dev, "PCIe: RC%d: number of PHY retries: %ld.\n", dev->rc_idx,
+ retries);
+
+ if (!pcie_phy_is_ready(dev)) {
+ PCIE_ERR(dev, "PCIe PHY RC%d failed to come up!\n",
+ dev->rc_idx);
+ pcie_phy_dump(dev);
+ return -ENODEV;
+ }
+
+ PCIE_INFO(dev, "PCIe RC%d PHY is ready!\n", dev->rc_idx);
+
+ return 0;
+}
+
+static void msm_pcie_iatu_config_all_ep(struct msm_pcie_dev_t *dev)
+{
+ int i;
+ u8 type;
+ struct msm_pcie_device_info *dev_table = dev->pcidev_table;
+
+ for (i = 0; i < MAX_DEVICE_NUM; i++) {
+ if (!dev_table[i].bdf)
+ break;
+
+ type = dev_table[i].bdf >> 24 == 0x1 ?
+ PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
+
+ msm_pcie_iatu_config(dev, i, type, dev_table[i].phy_address,
+ dev_table[i].phy_address + SZ_4K - 1,
+ dev_table[i].bdf);
+ }
+}
+
+static void msm_pcie_config_controller(struct msm_pcie_dev_t *dev)
+{
+ PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+ /*
+ * program and enable address translation region 0 (device config
+ * address space); region type config;
+ * axi config address range to device config address range
+ */
+ if (dev->enumerated) {
+ msm_pcie_iatu_config_all_ep(dev);
+ } else {
+ dev->current_bdf = 0; /* to force IATU re-config */
+ msm_pcie_cfg_bdf(dev, 1, 0);
+ }
+
+ /* configure N_FTS */
+ PCIE_DBG2(dev, "Original PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n",
+ readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG));
+ if (!dev->n_fts)
+ msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG,
+ 0, BIT(15));
+ else
+ msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG,
+ PCIE20_ACK_N_FTS,
+ dev->n_fts << 8);
+
+ if (dev->shadow_en)
+ dev->rc_shadow[PCIE20_ACK_F_ASPM_CTRL_REG / 4] =
+ readl_relaxed(dev->dm_core +
+ PCIE20_ACK_F_ASPM_CTRL_REG);
+
+ PCIE_DBG2(dev, "Updated PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n",
+ readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG));
+
+ /* configure AUX clock frequency register for PCIe core */
+ if (dev->use_19p2mhz_aux_clk)
+ msm_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, 0x14);
+ else
+ msm_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, 0x01);
+
+ /* configure the completion timeout value for PCIe core */
+ if (dev->cpl_timeout && dev->bridge_found)
+ msm_pcie_write_reg_field(dev->dm_core,
+ PCIE20_DEVICE_CONTROL2_STATUS2,
+ 0xf, dev->cpl_timeout);
+
+ /* Enable AER on RC */
+ if (dev->aer_enable) {
+ msm_pcie_write_mask(dev->dm_core + PCIE20_BRIDGE_CTRL, 0,
+ BIT(16)|BIT(17));
+ msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS, 0,
+ BIT(3)|BIT(2)|BIT(1)|BIT(0));
+
+ PCIE_DBG(dev, "RC's PCIE20_CAP_DEVCTRLSTATUS:0x%x\n",
+ readl_relaxed(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS));
+ }
+}
+
+static void msm_pcie_config_msi_controller(struct msm_pcie_dev_t *dev)
+{
+ int i;
+
+ PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+ /* program MSI controller and enable all interrupts */
+ writel_relaxed(MSM_PCIE_MSI_PHY, dev->dm_core + PCIE20_MSI_CTRL_ADDR);
+ writel_relaxed(0, dev->dm_core + PCIE20_MSI_CTRL_UPPER_ADDR);
+
+ for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++)
+ writel_relaxed(~0, dev->dm_core +
+ PCIE20_MSI_CTRL_INTR_EN + (i * 12));
+
+ /* ensure that hardware is configured before proceeding */
+ wmb();
+}
+
+static int msm_pcie_get_clk(struct msm_pcie_dev_t *pcie_dev)
+{
+ int i, cnt, ret;
+ struct msm_pcie_clk_info_t *clk_info;
+ u32 *clkfreq = NULL;
+ struct platform_device *pdev = pcie_dev->pdev;
+
+ cnt = of_property_count_elems_of_size((&pdev->dev)->of_node,
+ "max-clock-frequency-hz", sizeof(u32));
+ if (cnt <= 0)
+ return -EINVAL;
+
+ clkfreq = devm_kcalloc(&pdev->dev, MSM_PCIE_MAX_CLK +
+ MSM_PCIE_MAX_PIPE_CLK, sizeof(*clkfreq), GFP_KERNEL);
+ if (!clkfreq)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(pdev->dev.of_node,
+ "max-clock-frequency-hz", clkfreq, cnt);
+ if (ret) {
+ PCIE_ERR(pcie_dev,
+ "PCIe: RC%d: invalid max-clock-frequency-hz property %d\n",
+ pcie_dev->rc_idx, ret);
+ return ret;
+ }
+
+ for (i = 0; i < MSM_PCIE_MAX_CLK; i++) {
+ clk_info = &pcie_dev->clk[i];
+
+ clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name);
+
+ if (IS_ERR(clk_info->hdl)) {
+ if (clk_info->required) {
+ PCIE_DBG(pcie_dev,
+ "Clock %s isn't available:%ld\n",
+ clk_info->name, PTR_ERR(clk_info->hdl));
+ return PTR_ERR(clk_info->hdl);
+ }
+
+ PCIE_DBG(pcie_dev, "Ignoring Clock %s\n",
+ clk_info->name);
+ clk_info->hdl = NULL;
+ } else {
+ clk_info->freq = clkfreq[i + MSM_PCIE_MAX_PIPE_CLK];
+ PCIE_DBG(pcie_dev, "Freq of Clock %s is:%d\n",
+ clk_info->name, clk_info->freq);
+ }
+ }
+
+ for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++) {
+ clk_info = &pcie_dev->pipeclk[i];
+
+ clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name);
+
+ if (IS_ERR(clk_info->hdl)) {
+ if (clk_info->required) {
+ PCIE_DBG(pcie_dev,
+ "Clock %s isn't available:%ld\n",
+ clk_info->name, PTR_ERR(clk_info->hdl));
+ return PTR_ERR(clk_info->hdl);
+ }
+
+ PCIE_DBG(pcie_dev, "Ignoring Clock %s\n",
+ clk_info->name);
+ clk_info->hdl = NULL;
+ } else {
+ clk_info->freq = clkfreq[i];
+ PCIE_DBG(pcie_dev, "Freq of Clock %s is:%d\n",
+ clk_info->name, clk_info->freq);
+ }
+ }
+
+ return 0;
+}
+
+static int msm_pcie_get_vreg(struct msm_pcie_dev_t *pcie_dev)
+{
+ int i, len;
+ struct platform_device *pdev = pcie_dev->pdev;
+ const __be32 *prop;
+ char prop_name[MAX_PROP_SIZE];
+
+ for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
+ struct msm_pcie_vreg_info_t *vreg_info = &pcie_dev->vreg[i];
+
+ vreg_info->hdl = devm_regulator_get(&pdev->dev,
+ vreg_info->name);
+
+ if (PTR_ERR(vreg_info->hdl) == -EPROBE_DEFER) {
+ PCIE_DBG(pcie_dev, "EPROBE_DEFER for VReg:%s\n",
+ vreg_info->name);
+ return PTR_ERR(vreg_info->hdl);
+ }
+
+ if (IS_ERR(vreg_info->hdl)) {
+ if (vreg_info->required) {
+ PCIE_DBG(pcie_dev, "Vreg %s doesn't exist\n",
+ vreg_info->name);
+ return PTR_ERR(vreg_info->hdl);
+ }
+
+ PCIE_DBG(pcie_dev, "Optional Vreg %s doesn't exist\n",
+ vreg_info->name);
+ vreg_info->hdl = NULL;
+ } else {
+ pcie_dev->vreg_n++;
+ snprintf(prop_name, MAX_PROP_SIZE,
+ "qcom,%s-voltage-level", vreg_info->name);
+ prop = of_get_property(pdev->dev.of_node,
+ prop_name, &len);
+ if (!prop || (len != (3 * sizeof(__be32)))) {
+ PCIE_DBG(pcie_dev, "%s %s property\n",
+ prop ? "invalid format" :
+ "no", prop_name);
+ } else {
+ vreg_info->max_v = be32_to_cpup(&prop[0]);
+ vreg_info->min_v = be32_to_cpup(&prop[1]);
+ vreg_info->opt_mode =
+ be32_to_cpup(&prop[2]);
+ }
+ }
+ }
+
+ pcie_dev->gdsc = devm_regulator_get(&pdev->dev, "gdsc-vdd");
+
+ if (IS_ERR(pcie_dev->gdsc)) {
+ PCIE_ERR(pcie_dev, "PCIe: RC%d: Failed to get %s GDSC:%ld\n",
+ pcie_dev->rc_idx, pdev->name, PTR_ERR(pcie_dev->gdsc));
+ if (PTR_ERR(pcie_dev->gdsc) == -EPROBE_DEFER)
+ PCIE_DBG(pcie_dev, "PCIe: EPROBE_DEFER for %s GDSC\n",
+ pdev->name);
+ return PTR_ERR(pcie_dev->gdsc);
+ }
+
+ return 0;
+}
+
+static int msm_pcie_get_reset(struct msm_pcie_dev_t *pcie_dev)
+{
+ int i;
+ struct msm_pcie_reset_info_t *reset_info;
+
+ for (i = 0; i < MSM_PCIE_MAX_RESET; i++) {
+ reset_info = &pcie_dev->reset[i];
+ reset_info->hdl = devm_reset_control_get(&pcie_dev->pdev->dev,
+ reset_info->name);
+ if (IS_ERR(reset_info->hdl)) {
+ if (reset_info->required) {
+ PCIE_DBG(pcie_dev,
+ "Reset %s isn't available:%ld\n",
+ reset_info->name,
+ PTR_ERR(reset_info->hdl));
+
+ return PTR_ERR(reset_info->hdl);
+ }
+
+ PCIE_DBG(pcie_dev, "Ignoring Reset %s\n",
+ reset_info->name);
+ reset_info->hdl = NULL;
+ }
+ }
+
+ for (i = 0; i < MSM_PCIE_MAX_PIPE_RESET; i++) {
+ reset_info = &pcie_dev->pipe_reset[i];
+ reset_info->hdl = devm_reset_control_get(&pcie_dev->pdev->dev,
+ reset_info->name);
+ if (IS_ERR(reset_info->hdl)) {
+ if (reset_info->required) {
+ PCIE_DBG(pcie_dev,
+ "Pipe Reset %s isn't available:%ld\n",
+ reset_info->name,
+ PTR_ERR(reset_info->hdl));
+ return PTR_ERR(reset_info->hdl);
+ }
+
+ PCIE_DBG(pcie_dev, "Ignoring Pipe Reset %s\n",
+ reset_info->name);
+ reset_info->hdl = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static int msm_pcie_get_phy(struct msm_pcie_dev_t *pcie_dev)
+{
+ int ret, size = 0;
+ struct platform_device *pdev = pcie_dev->pdev;
+
+ of_get_property(pdev->dev.of_node, "qcom,phy-sequence", &size);
+ if (!size) {
+ PCIE_DBG(pcie_dev,
+ "PCIe: RC%d: phy sequence is not present in DT\n",
+ pcie_dev->rc_idx);
+ return 0;
+ }
+
+ pcie_dev->phy_sequence = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (!pcie_dev->phy_sequence)
+ return -ENOMEM;
+
+ pcie_dev->phy_len = size / sizeof(*pcie_dev->phy_sequence);
+
+ ret = of_property_read_u32_array(pdev->dev.of_node,
+ "qcom,phy-sequence",
+ (unsigned int *)pcie_dev->phy_sequence,
+ size / sizeof(pcie_dev->phy_sequence->offset));
+ if (ret) {
+ devm_kfree(&pdev->dev, pcie_dev->phy_sequence);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int msm_pcie_get_iommu_map(struct msm_pcie_dev_t *pcie_dev)
+{
+ /* iommu map structure */
+ struct {
+ u32 bdf;
+ u32 phandle;
+ u32 smmu_sid;
+ u32 smmu_sid_len;
+ } *map;
+ struct platform_device *pdev = pcie_dev->pdev;
+ int i, size = 0;
+
+ of_get_property(pdev->dev.of_node, "iommu-map", &size);
+ if (!size) {
+ PCIE_DBG(pcie_dev,
+ "PCIe: RC%d: iommu-map is not present in DT.\n",
+ pcie_dev->rc_idx);
+ return 0;
+ }
+
+ map = kzalloc(size, GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ of_property_read_u32_array(pdev->dev.of_node,
+ "iommu-map", (u32 *)map, size / sizeof(u32));
+
+ pcie_dev->sid_info_len = size / (sizeof(*map));
+ pcie_dev->sid_info = devm_kcalloc(&pdev->dev, pcie_dev->sid_info_len,
+ sizeof(*pcie_dev->sid_info), GFP_KERNEL);
+ if (!pcie_dev->sid_info) {
+ kfree(map);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < pcie_dev->sid_info_len; i++) {
+ pcie_dev->sid_info[i].bdf = map[i].bdf;
+ pcie_dev->sid_info[i].smmu_sid = map[i].smmu_sid;
+ pcie_dev->sid_info[i].pcie_sid =
+ pcie_dev->sid_info[i].smmu_sid -
+ pcie_dev->smmu_sid_base;
+ }
+
+ kfree(map);
+
+ return 0;
+}
+
+static int msm_pcie_get_gpio(struct msm_pcie_dev_t *pcie_dev)
+{
+ int i, ret;
+
+ pcie_dev->gpio_n = 0;
+ for (i = 0; i < MSM_PCIE_MAX_GPIO; i++) {
+ struct msm_pcie_gpio_info_t *gpio_info = &pcie_dev->gpio[i];
+
+ ret = of_get_named_gpio(pcie_dev->pdev->dev.of_node,
+ gpio_info->name, 0);
+ if (ret >= 0) {
+ gpio_info->num = ret;
+ pcie_dev->gpio_n++;
+ PCIE_DBG(pcie_dev, "GPIO num for %s is %d\n",
+ gpio_info->name, gpio_info->num);
+ } else {
+ if (gpio_info->required) {
+ PCIE_ERR(pcie_dev,
+ "Could not get required GPIO %s\n",
+ gpio_info->name);
+ return ret;
+ }
+
+ PCIE_DBG(pcie_dev, "Could not get optional GPIO %s\n",
+ gpio_info->name);
+ }
+ }
+
+ pcie_dev->wake_n = 0;
+ if (pcie_dev->gpio[MSM_PCIE_GPIO_WAKE].num)
+ pcie_dev->wake_n =
+ gpio_to_irq(pcie_dev->gpio[MSM_PCIE_GPIO_WAKE].num);
+
+ return 0;
+}
+
+static int msm_pcie_get_reg(struct msm_pcie_dev_t *pcie_dev)
+{
+ struct resource *res;
+ struct msm_pcie_res_info_t *res_info;
+ int i;
+
+ for (i = 0; i < MSM_PCIE_MAX_RES; i++) {
+ res_info = &pcie_dev->res[i];
+
+ res = platform_get_resource_byname(pcie_dev->pdev,
+ IORESOURCE_MEM, res_info->name);
+ if (!res) {
+ PCIE_ERR(pcie_dev,
+ "PCIe: RC%d: no %s resource found.\n",
+ pcie_dev->rc_idx, res_info->name);
+ } else {
+ PCIE_DBG(pcie_dev, "start addr for %s is %pa.\n",
+ res_info->name, &res->start);
+
+ res_info->base = devm_ioremap(&pcie_dev->pdev->dev,
+ res->start, resource_size(res));
+ if (!res_info->base) {
+ PCIE_ERR(pcie_dev,
+ "PCIe: RC%d: can't remap %s.\n",
+ pcie_dev->rc_idx, res_info->name);
+ return -ENOMEM;
+ }
+
+ res_info->resource = res;
+ }
+ }
+
+ pcie_dev->parf = pcie_dev->res[MSM_PCIE_RES_PARF].base;
+ pcie_dev->phy = pcie_dev->res[MSM_PCIE_RES_PHY].base;
+ pcie_dev->elbi = pcie_dev->res[MSM_PCIE_RES_ELBI].base;
+ pcie_dev->iatu = pcie_dev->res[MSM_PCIE_RES_IATU].base;
+ pcie_dev->dm_core = pcie_dev->res[MSM_PCIE_RES_DM_CORE].base;
+ pcie_dev->conf = pcie_dev->res[MSM_PCIE_RES_CONF].base;
+ pcie_dev->bars = pcie_dev->res[MSM_PCIE_RES_BARS].base;
+ pcie_dev->tcsr = pcie_dev->res[MSM_PCIE_RES_TCSR].base;
+ pcie_dev->rumi = pcie_dev->res[MSM_PCIE_RES_RUMI].base;
+ pcie_dev->dev_mem_res = pcie_dev->res[MSM_PCIE_RES_BARS].resource;
+ pcie_dev->dev_io_res = pcie_dev->res[MSM_PCIE_RES_IO].resource;
+ pcie_dev->dev_io_res->flags = IORESOURCE_IO;
+
+ return 0;
+}
+
+static int msm_pcie_get_resources(struct msm_pcie_dev_t *dev,
+ struct platform_device *pdev)
+{
+ int i, ret = 0;
+ struct resource *res;
+ struct msm_pcie_irq_info_t *irq_info;
+
+ PCIE_DBG(dev, "PCIe: RC%d: entry\n", dev->rc_idx);
+
+ dev->bus_scale_table = msm_bus_cl_get_pdata(pdev);
+ if (!dev->bus_scale_table) {
+ PCIE_DBG(dev, "PCIe: RC%d: No bus scale table for %s\n",
+ dev->rc_idx, dev->pdev->name);
+ dev->bus_client = 0;
+ } else {
+ dev->bus_client =
+ msm_bus_scale_register_client(dev->bus_scale_table);
+ if (!dev->bus_client) {
+ PCIE_ERR(dev,
+ "PCIe: RC%d:Failed to register bus client %s\n",
+ dev->rc_idx, dev->pdev->name);
+ msm_bus_cl_clear_pdata(dev->bus_scale_table);
+ return -ENODEV;
+ }
+ }
+
+ for (i = 0; i < MSM_PCIE_MAX_IRQ; i++) {
+ irq_info = &dev->irq[i];
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ irq_info->name);
+ if (!res) {
+ PCIE_DBG(dev, "PCIe: RC%d: can't find IRQ # for %s.\n",
+ dev->rc_idx, irq_info->name);
+ } else {
+ irq_info->num = res->start;
+ PCIE_DBG(dev, "IRQ # for %s is %d.\n", irq_info->name,
+ irq_info->num);
+ }
+ }
+
+ ret = msm_pcie_get_clk(dev);
+ if (ret)
+ return ret;
+
+ ret = msm_pcie_get_vreg(dev);
+ if (ret)
+ return ret;
+
+ ret = msm_pcie_get_reset(dev);
+ if (ret)
+ return ret;
+
+ ret = msm_pcie_get_phy(dev);
+ if (ret)
+ return ret;
+
+ ret = msm_pcie_get_iommu_map(dev);
+ if (ret)
+ return ret;
+
+ ret = msm_pcie_get_gpio(dev);
+ if (ret)
+ return ret;
+
+ ret = msm_pcie_get_reg(dev);
+ if (ret)
+ return ret;
+
+ PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
+
+ return 0;
+}
+
+static void msm_pcie_release_resources(struct msm_pcie_dev_t *dev)
+{
+ dev->parf = NULL;
+ dev->elbi = NULL;
+ dev->iatu = NULL;
+ dev->dm_core = NULL;
+ dev->conf = NULL;
+ dev->bars = NULL;
+ dev->tcsr = NULL;
+ dev->rumi = NULL;
+ dev->dev_mem_res = NULL;
+ dev->dev_io_res = NULL;
+}
+
+static int msm_pcie_link_train(struct msm_pcie_dev_t *dev)
+{
+ int link_check_count = 0;
+ uint32_t val;
+
+ msm_pcie_write_reg_field(dev->dm_core,
+ PCIE_GEN3_GEN2_CTRL, 0x1f00, 1);
+
+ msm_pcie_write_mask(dev->dm_core,
+ PCIE_GEN3_EQ_CONTROL, 0x20);
+
+ msm_pcie_write_mask(dev->dm_core +
+ PCIE_GEN3_RELATED, BIT(0), 0);
+
+ /* configure PCIe preset */
+ msm_pcie_write_reg_field(dev->dm_core,
+ PCIE_GEN3_MISC_CONTROL, BIT(0), 1);
+ msm_pcie_write_reg(dev->dm_core,
+ PCIE_GEN3_SPCIE_CAP, 0x77777777);
+ msm_pcie_write_reg_field(dev->dm_core,
+ PCIE_GEN3_MISC_CONTROL, BIT(0), 0);
+
+ if (msm_pcie_force_gen1 & BIT(dev->rc_idx))
+ dev->target_link_speed = GEN1_SPEED;
+
+ if (dev->target_link_speed)
+ msm_pcie_write_reg_field(dev->dm_core,
+ PCIE20_CAP + PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCAP_SLS, dev->target_link_speed);
+
+ /* set max tlp read size */
+ msm_pcie_write_reg_field(dev->dm_core, PCIE20_DEVICE_CONTROL_STATUS,
+ 0x7000, dev->tlp_rd_size);
+
+ /* enable link training */
+ msm_pcie_write_mask(dev->parf + PCIE20_PARF_LTSSM, 0, BIT(8));
+
+ PCIE_DBG(dev, "%s", "check if link is up\n");
+
+ if (msm_pcie_link_check_max_count & BIT(dev->rc_idx))
+ dev->link_check_max_count = msm_pcie_link_check_max_count >> 4;
+
+ /* Wait for up to 100ms for the link to come up */
+ do {
+ usleep_range(LINK_UP_TIMEOUT_US_MIN, LINK_UP_TIMEOUT_US_MAX);
+ val = readl_relaxed(dev->elbi + PCIE20_ELBI_SYS_STTS);
+ PCIE_DBG(dev, "PCIe RC%d: LTSSM_STATE:0x%x\n",
+ dev->rc_idx, (val >> 12) & 0x3f);
+ } while ((!(val & XMLH_LINK_UP) ||
+ !msm_pcie_confirm_linkup(dev, false, false, NULL))
+ && (link_check_count++ < dev->link_check_max_count));
+
+ if ((val & XMLH_LINK_UP) &&
+ msm_pcie_confirm_linkup(dev, false, false, NULL)) {
+ PCIE_DBG(dev, "Link is up after %d checkings\n",
+ link_check_count);
+ PCIE_INFO(dev, "PCIe RC%d link initialized\n", dev->rc_idx);
+ } else {
+ PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
+ dev->rc_idx);
+ gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
+ dev->gpio[MSM_PCIE_GPIO_PERST].on);
+ PCIE_ERR(dev, "PCIe RC%d link initialization failed\n",
+ dev->rc_idx);
+ return MSM_PCIE_ERROR;
+ }
+
+ return 0;
+}
+
+static int msm_pcie_enable(struct msm_pcie_dev_t *dev)
+{
+ int ret = 0;
+ uint32_t val;
+ unsigned long ep_up_timeout = 0;
+
+ PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
+
+ mutex_lock(&dev->setup_lock);
+
+ if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
+ PCIE_ERR(dev, "PCIe: the link of RC%d is already enabled\n",
+ dev->rc_idx);
+ goto out;
+ }
+
+ /* assert PCIe reset link to keep EP in reset */
+
+ PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
+ dev->rc_idx);
+ gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
+ dev->gpio[MSM_PCIE_GPIO_PERST].on);
+ usleep_range(PERST_PROPAGATION_DELAY_US_MIN,
+ PERST_PROPAGATION_DELAY_US_MAX);
+
+ /* enable power */
+ ret = msm_pcie_vreg_init(dev);
+ if (ret)
+ goto out;
+
+ /* enable clocks */
+ ret = msm_pcie_clk_init(dev);
+ /* ensure that changes propagated to the hardware */
+ wmb();
+ if (ret)
+ goto clk_fail;
+
+ /* RUMI PCIe reset sequence */
+ if (dev->rumi_init)
+ dev->rumi_init(dev);
+
+ /* configure PCIe to RC mode */
+ msm_pcie_write_reg(dev->parf, PCIE20_PARF_DEVICE_TYPE, 0x4);
+
+ /* enable l1 mode, clear bit 5 (REQ_NOT_ENTR_L1) */
+ if (dev->l1_supported)
+ msm_pcie_write_mask(dev->parf + PCIE20_PARF_PM_CTRL, BIT(5), 0);
+
+ /* enable PCIe clocks and resets */
+ msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, BIT(0), 0);
+
+ /* change DBI base address */
+ writel_relaxed(0, dev->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ writel_relaxed(0x365E, dev->parf + PCIE20_PARF_SYS_CTRL);
+
+ msm_pcie_write_mask(dev->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL,
+ 0, BIT(4));
+
+ /* enable selected IRQ */
+ msm_pcie_write_reg(dev->parf, PCIE20_PARF_INT_ALL_MASK, 0);
+
+ msm_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_MASK, 0,
+ BIT(MSM_PCIE_INT_EVT_LINK_DOWN) |
+ BIT(MSM_PCIE_INT_EVT_AER_LEGACY) |
+ BIT(MSM_PCIE_INT_EVT_AER_ERR) |
+ BIT(MSM_PCIE_INT_EVT_MSI_0) |
+ BIT(MSM_PCIE_INT_EVT_MSI_1) |
+ BIT(MSM_PCIE_INT_EVT_MSI_2) |
+ BIT(MSM_PCIE_INT_EVT_MSI_3) |
+ BIT(MSM_PCIE_INT_EVT_MSI_4) |
+ BIT(MSM_PCIE_INT_EVT_MSI_5) |
+ BIT(MSM_PCIE_INT_EVT_MSI_6) |
+ BIT(MSM_PCIE_INT_EVT_MSI_7));
+
+ PCIE_DBG(dev, "PCIe: RC%d: PCIE20_PARF_INT_ALL_MASK: 0x%x\n",
+ dev->rc_idx,
+ readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK));
+
+ writel_relaxed(dev->slv_addr_space_size, dev->parf +
+ PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
+
+ if (dev->use_msi) {
+ PCIE_DBG(dev, "RC%d: enable WR halt.\n", dev->rc_idx);
+ val = dev->wr_halt_size ? dev->wr_halt_size :
+ readl_relaxed(dev->parf +
+ PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+
+ msm_pcie_write_reg(dev->parf,
+ PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT,
+ BIT(31) | val);
+
+ PCIE_DBG(dev,
+ "RC%d: PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT: 0x%x.\n",
+ dev->rc_idx,
+ readl_relaxed(dev->parf +
+ PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT));
+ }
+
+ /* init PCIe PHY */
+ ret = pcie_phy_init(dev);
+ if (ret)
+ goto link_fail;
+
+ usleep_range(dev->ep_latency * 1000, dev->ep_latency * 1000);
+
+ if (dev->gpio[MSM_PCIE_GPIO_EP].num)
+ gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
+ dev->gpio[MSM_PCIE_GPIO_EP].on);
+
+ /* de-assert PCIe reset link to bring EP out of reset */
+
+ PCIE_INFO(dev, "PCIe: Release the reset of endpoint of RC%d.\n",
+ dev->rc_idx);
+ gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
+ 1 - dev->gpio[MSM_PCIE_GPIO_PERST].on);
+ usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max);
+
+ ep_up_timeout = jiffies + usecs_to_jiffies(EP_UP_TIMEOUT_US);
+
+ ret = msm_pcie_link_train(dev);
+ if (ret)
+ goto link_fail;
+
+ dev->link_status = MSM_PCIE_LINK_ENABLED;
+ dev->power_on = true;
+ dev->suspending = false;
+ dev->link_turned_on_counter++;
+
+ if (dev->switch_latency) {
+ PCIE_DBG(dev, "switch_latency: %dms\n",
+ dev->switch_latency);
+ if (dev->switch_latency <= SWITCH_DELAY_MAX)
+ usleep_range(dev->switch_latency * 1000,
+ dev->switch_latency * 1000);
+ else
+ msleep(dev->switch_latency);
+ }
+
+ msm_pcie_config_sid(dev);
+ msm_pcie_config_controller(dev);
+
+ /* check endpoint configuration space is accessible */
+ while (time_before(jiffies, ep_up_timeout)) {
+ if (readl_relaxed(dev->conf) != PCIE_LINK_DOWN)
+ break;
+ usleep_range(EP_UP_TIMEOUT_US_MIN, EP_UP_TIMEOUT_US_MAX);
+ }
+
+ if (readl_relaxed(dev->conf) != PCIE_LINK_DOWN) {
+ PCIE_DBG(dev,
+ "PCIe: RC%d: endpoint config space is accessible\n",
+ dev->rc_idx);
+ } else {
+ PCIE_ERR(dev,
+ "PCIe: RC%d: endpoint config space is not accessible\n",
+ dev->rc_idx);
+ dev->link_status = MSM_PCIE_LINK_DISABLED;
+ dev->power_on = false;
+ dev->link_turned_off_counter++;
+ ret = -ENODEV;
+ goto link_fail;
+ }
+
+ if (!IS_ENABLED(CONFIG_PCI_MSM_MSI))
+ msm_pcie_config_msi_controller(dev);
+
+ if (dev->enumerated)
+ msm_pcie_config_link_pm(dev, true);
+
+ goto out;
+
+link_fail:
+ if (msm_pcie_keep_resources_on & BIT(dev->rc_idx))
+ goto out;
+
+ if (dev->gpio[MSM_PCIE_GPIO_EP].num)
+ gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
+ 1 - dev->gpio[MSM_PCIE_GPIO_EP].on);
+
+ if (dev->phy_power_down_offset)
+ msm_pcie_write_reg(dev->phy, dev->phy_power_down_offset, 0);
+
+ msm_pcie_pipe_clk_deinit(dev);
+ msm_pcie_clk_deinit(dev);
+clk_fail:
+ msm_pcie_vreg_deinit(dev);
+out:
+ mutex_unlock(&dev->setup_lock);
+
+ PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
+
+ return ret;
+}
+
+static void msm_pcie_disable(struct msm_pcie_dev_t *dev)
+{
+ PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
+
+ mutex_lock(&dev->setup_lock);
+
+ if (!dev->power_on) {
+ PCIE_DBG(dev,
+ "PCIe: the link of RC%d is already power down.\n",
+ dev->rc_idx);
+ mutex_unlock(&dev->setup_lock);
+ return;
+ }
+
+ dev->link_status = MSM_PCIE_LINK_DISABLED;
+ dev->power_on = false;
+ dev->link_turned_off_counter++;
+
+ PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
+ dev->rc_idx);
+
+ gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
+ dev->gpio[MSM_PCIE_GPIO_PERST].on);
+
+ if (dev->phy_power_down_offset)
+ msm_pcie_write_reg(dev->phy, dev->phy_power_down_offset, 0);
+
+ msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, 0,
+ BIT(0));
+ msm_pcie_clk_deinit(dev);
+ msm_pcie_vreg_deinit(dev);
+ msm_pcie_pipe_clk_deinit(dev);
+
+ if (dev->gpio[MSM_PCIE_GPIO_EP].num)
+ gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
+ 1 - dev->gpio[MSM_PCIE_GPIO_EP].on);
+
+ mutex_unlock(&dev->setup_lock);
+
+ PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
+}
+
+static void msm_pcie_config_ep_aer(struct msm_pcie_dev_t *dev,
+ struct msm_pcie_device_info *ep_dev_info)
+{
+ u32 val;
+ void __iomem *ep_base = ep_dev_info->conf_base;
+ u32 current_offset = readl_relaxed(ep_base + PCIE_CAP_PTR_OFFSET) &
+ 0xff;
+
+ while (current_offset) {
+ if (msm_pcie_check_align(dev, current_offset))
+ return;
+
+ val = readl_relaxed(ep_base + current_offset);
+ if ((val & 0xff) == PCIE20_CAP_ID) {
+ ep_dev_info->dev_ctrlstts_offset =
+ current_offset + 0x8;
+ break;
+ }
+ current_offset = (val >> 8) & 0xff;
+ }
+
+ if (!ep_dev_info->dev_ctrlstts_offset) {
+ PCIE_DBG(dev,
+ "RC%d endpoint does not support PCIe cap registers\n",
+ dev->rc_idx);
+ return;
+ }
+
+ PCIE_DBG2(dev, "RC%d: EP dev_ctrlstts_offset: 0x%x\n",
+ dev->rc_idx, ep_dev_info->dev_ctrlstts_offset);
+
+ /* Enable AER on EP */
+ msm_pcie_write_mask(ep_base + ep_dev_info->dev_ctrlstts_offset, 0,
+ BIT(3)|BIT(2)|BIT(1)|BIT(0));
+
+ PCIE_DBG(dev, "EP's PCIE20_CAP_DEVCTRLSTATUS:0x%x\n",
+ readl_relaxed(ep_base + ep_dev_info->dev_ctrlstts_offset));
+}
+
+static int msm_pcie_config_device_table(struct device *dev, void *pdev)
+{
+ struct pci_dev *pcidev = to_pci_dev(dev);
+ struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *) pdev;
+ struct msm_pcie_device_info *dev_table_t = pcie_dev->pcidev_table;
+ struct resource *axi_conf = pcie_dev->res[MSM_PCIE_RES_CONF].resource;
+ int ret = 0;
+ u32 rc_idx = pcie_dev->rc_idx;
+ u32 i, index;
+ u32 bdf = 0;
+ u8 type;
+ u32 h_type;
+ u32 bme;
+
+ if (!pcidev) {
+ PCIE_ERR(pcie_dev,
+ "PCIe: Did not find PCI device in list for RC%d.\n",
+ pcie_dev->rc_idx);
+ return -ENODEV;
+ }
+
+ PCIE_DBG(pcie_dev,
+ "PCI device found: vendor-id:0x%x device-id:0x%x\n",
+ pcidev->vendor, pcidev->device);
+
+ if (!pcidev->bus->number)
+ return ret;
+
+ bdf = BDF_OFFSET(pcidev->bus->number, pcidev->devfn);
+ type = pcidev->bus->number == 1 ?
+ PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
+
+ for (i = 0; i < (MAX_RC_NUM * MAX_DEVICE_NUM); i++) {
+ if (msm_pcie_dev_tbl[i].bdf == bdf &&
+ !msm_pcie_dev_tbl[i].dev) {
+ for (index = 0; index < MAX_DEVICE_NUM; index++) {
+ if (dev_table_t[index].bdf == bdf) {
+ msm_pcie_dev_tbl[i].dev = pcidev;
+ msm_pcie_dev_tbl[i].domain = rc_idx;
+ msm_pcie_dev_tbl[i].conf_base =
+ pcie_dev->conf + index * SZ_4K;
+ msm_pcie_dev_tbl[i].phy_address =
+ axi_conf->start + index * SZ_4K;
+
+ dev_table_t[index].dev = pcidev;
+ dev_table_t[index].domain = rc_idx;
+ dev_table_t[index].conf_base =
+ pcie_dev->conf + index * SZ_4K;
+ dev_table_t[index].phy_address =
+ axi_conf->start + index * SZ_4K;
+
+ msm_pcie_iatu_config(pcie_dev, index,
+ type,
+ dev_table_t[index].phy_address,
+ dev_table_t[index].phy_address
+ + SZ_4K - 1,
+ bdf);
+
+ h_type = readl_relaxed(
+ dev_table_t[index].conf_base +
+ PCIE20_HEADER_TYPE);
+
+ bme = readl_relaxed(
+ dev_table_t[index].conf_base +
+ PCIE20_COMMAND_STATUS);
+
+ if (h_type & (1 << 16)) {
+ pci_write_config_dword(pcidev,
+ PCIE20_COMMAND_STATUS,
+ bme | 0x06);
+ } else {
+ pcie_dev->num_ep++;
+ dev_table_t[index].registered =
+ false;
+ }
+
+ if (pcie_dev->num_ep > 1)
+ pcie_dev->pending_ep_reg = true;
+
+ if (pcie_dev->aer_enable)
+ msm_pcie_config_ep_aer(pcie_dev,
+ &dev_table_t[index]);
+
+ break;
+ }
+ }
+ if (index == MAX_DEVICE_NUM) {
+ PCIE_ERR(pcie_dev,
+ "RC%d PCI device table is full.\n",
+ rc_idx);
+ ret = index;
+ } else {
+ break;
+ }
+ } else if (msm_pcie_dev_tbl[i].bdf == bdf &&
+ pcidev == msm_pcie_dev_tbl[i].dev) {
+ break;
+ }
+ }
+ if (i == MAX_RC_NUM * MAX_DEVICE_NUM) {
+ PCIE_ERR(pcie_dev,
+ "Global PCI device table is full: %d elements.\n",
+ i);
+ PCIE_ERR(pcie_dev,
+ "Bus number is 0x%x\nDevice number is 0x%x\n",
+ pcidev->bus->number, pcidev->devfn);
+ ret = i;
+ }
+ return ret;
+}
+
+static void msm_pcie_config_sid(struct msm_pcie_dev_t *dev)
+{
+ void __iomem *bdf_to_sid_base = dev->parf +
+ PCIE20_PARF_BDF_TO_SID_TABLE_N;
+ int i;
+
+ if (!dev->sid_info)
+ return;
+
+ /* Registers need to be zero out first */
+ memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32));
+
+ if (dev->enumerated) {
+ for (i = 0; i < dev->sid_info_len; i++)
+ writel_relaxed(dev->sid_info[i].value,
+ bdf_to_sid_base + dev->sid_info[i].hash *
+ sizeof(u32));
+ return;
+ }
+
+ /* initial setup for boot */
+ for (i = 0; i < dev->sid_info_len; i++) {
+ struct msm_pcie_sid_info_t *sid_info = &dev->sid_info[i];
+ u32 val;
+ u8 hash;
+ u16 bdf_be = cpu_to_be16(sid_info->bdf);
+
+ hash = crc8(msm_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be),
+ 0);
+
+ val = readl_relaxed(bdf_to_sid_base + hash * sizeof(u32));
+
+ /* if there is a collision, look for next available entry */
+ while (val) {
+ u8 current_hash = hash++;
+ u8 next_mask = 0xff;
+
+ /* if NEXT is NULL then update current entry */
+ if (!(val & next_mask)) {
+ int j;
+
+ val |= (u32)hash;
+ writel_relaxed(val, bdf_to_sid_base +
+ current_hash * sizeof(u32));
+
+ /* sid_info of current hash and update it */
+ for (j = 0; j < dev->sid_info_len; j++) {
+ if (dev->sid_info[j].hash !=
+ current_hash)
+ continue;
+
+ dev->sid_info[j].next_hash = hash;
+ dev->sid_info[j].value = val;
+ break;
+ }
+ }
+
+ val = readl_relaxed(bdf_to_sid_base +
+ hash * sizeof(u32));
+ }
+
+ /* BDF [31:16] | SID [15:8] | NEXT [7:0] */
+ val = sid_info->bdf << 16 | sid_info->pcie_sid << 8 | 0;
+ writel_relaxed(val, bdf_to_sid_base + hash * sizeof(u32));
+
+ sid_info->hash = hash;
+ sid_info->value = val;
+ }
+}
+
+int msm_pcie_enumerate(u32 rc_idx)
+{
+ int ret = 0, bus_ret = 0;
+ struct msm_pcie_dev_t *dev = &msm_pcie_dev[rc_idx];
+ struct pci_dev *pcidev = NULL;
+ struct pci_host_bridge *bridge;
+ bool found = false;
+ struct pci_bus *bus;
+ resource_size_t iobase = 0;
+ u32 ids, vendor_id, device_id;
+ LIST_HEAD(res);
+
+ mutex_lock(&dev->enumerate_lock);
+
+ PCIE_DBG(dev, "Enumerate RC%d\n", rc_idx);
+
+ if (!dev->drv_ready) {
+ PCIE_DBG(dev,
+ "PCIe: RC%d: has not been successfully probed yet\n",
+ rc_idx);
+ ret = -EPROBE_DEFER;
+ goto out;
+ }
+
+ if (dev->enumerated) {
+ PCIE_ERR(dev, "PCIe: RC%d: has already been enumerated.\n",
+ dev->rc_idx);
+ goto out;
+ }
+
+ ret = msm_pcie_enable(dev);
+ if (ret) {
+ PCIE_ERR(dev, "PCIe: RC%d: failed to enable\n", dev->rc_idx);
+ goto out;
+ }
+
+ /* kick start ARM PCI configuration framework */
+ ids = readl_relaxed(dev->dm_core);
+ vendor_id = ids & 0xffff;
+ device_id = (ids & 0xffff0000) >> 16;
+
+ PCIE_DBG(dev, "PCIe: RC%d: vendor-id:0x%x device_id:0x%x\n",
+ dev->rc_idx, vendor_id, device_id);
+
+ bridge = devm_pci_alloc_host_bridge(&dev->pdev->dev, sizeof(*dev));
+ if (!bridge) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = devm_of_pci_get_host_bridge_resources(&dev->pdev->dev, 0, 0xff,
+ &res, &iobase);
+ if (ret) {
+ PCIE_ERR(dev,
+ "PCIe: RC%d: failed to get host bridge resources. ret: %d\n",
+ dev->rc_idx, ret);
+ goto out;
+ }
+
+ ret = devm_request_pci_bus_resources(&dev->pdev->dev, &res);
+ if (ret) {
+ PCIE_ERR(dev,
+ "PCIe: RC%d: failed to request pci bus resources %d\n",
+ dev->rc_idx, ret);
+ goto out;
+ }
+
+ if (IS_ENABLED(CONFIG_PCI_MSM_MSI)) {
+ ret = msm_msi_init(&dev->pdev->dev);
+ if (ret)
+ goto out;
+ }
+
+ list_splice_init(&res, &bridge->windows);
+ bridge->dev.parent = &dev->pdev->dev;
+ bridge->sysdata = dev;
+ bridge->busnr = 0;
+ bridge->ops = &msm_pcie_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
+
+ ret = pci_scan_root_bus_bridge(bridge);
+ if (ret) {
+ PCIE_ERR(dev, "PCIe: RC%d: failed to scan root bus %d\n",
+ dev->rc_idx, ret);
+ goto out;
+ }
+
+ bus = bridge->bus;
+
+ msm_pcie_fixup_irqs(dev);
+ pci_assign_unassigned_bus_resources(bus);
+ pci_bus_add_devices(bus);
+
+ dev->enumerated = true;
+
+ msm_pcie_write_mask(dev->dm_core +
+ PCIE20_COMMAND_STATUS, 0, BIT(2)|BIT(1));
+
+ if (dev->cpl_timeout && dev->bridge_found)
+ msm_pcie_write_reg_field(dev->dm_core,
+ PCIE20_DEVICE_CONTROL2_STATUS2, 0xf, dev->cpl_timeout);
+
+ if (dev->shadow_en) {
+ u32 val = readl_relaxed(dev->dm_core + PCIE20_COMMAND_STATUS);
+
+ PCIE_DBG(dev, "PCIE20_COMMAND_STATUS:0x%x\n", val);
+ dev->rc_shadow[PCIE20_COMMAND_STATUS / 4] = val;
+ }
+
+ do {
+ pcidev = pci_get_device(vendor_id, device_id, pcidev);
+ if (pcidev && (dev == (struct msm_pcie_dev_t *)
+ PCIE_BUS_PRIV_DATA(pcidev->bus))) {
+ dev->dev = pcidev;
+ found = true;
+ }
+ } while (!found && pcidev);
+
+ if (!pcidev) {
+ PCIE_ERR(dev, "PCIe: RC%d: Did not find PCI device.\n",
+ dev->rc_idx);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ bus_ret = bus_for_each_dev(&pci_bus_type, NULL, dev,
+ &msm_pcie_config_device_table);
+ if (bus_ret) {
+ PCIE_ERR(dev, "PCIe: RC%d: Failed to set up device table\n",
+ dev->rc_idx);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ msm_pcie_check_l1ss_support_all(dev);
+ msm_pcie_config_link_pm(dev, true);
+out:
+ mutex_unlock(&dev->enumerate_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(msm_pcie_enumerate);
+
+static void msm_pcie_notify_client(struct msm_pcie_dev_t *dev,
+ enum msm_pcie_event event)
+{
+ if (dev->event_reg && dev->event_reg->callback &&
+ (dev->event_reg->events & event)) {
+ struct msm_pcie_notify *notify = &dev->event_reg->notify;
+
+ notify->event = event;
+ notify->user = dev->event_reg->user;
+ PCIE_DBG(dev, "PCIe: callback RC%d for event %d\n",
+ dev->rc_idx, event);
+ dev->event_reg->callback(notify);
+
+ if ((dev->event_reg->options & MSM_PCIE_CONFIG_NO_RECOVERY) &&
+ (event == MSM_PCIE_EVENT_LINKDOWN)) {
+ dev->user_suspend = true;
+ PCIE_DBG(dev,
+ "PCIe: Client of RC%d will recover the link later.\n",
+ dev->rc_idx);
+ return;
+ }
+ } else {
+ PCIE_DBG2(dev,
+ "PCIe: Client of RC%d does not have registration for event %d\n",
+ dev->rc_idx, event);
+ }
+}
+
+static void handle_wake_func(struct work_struct *work)
+{
+ int i, ret;
+ struct msm_pcie_dev_t *dev = container_of(work, struct msm_pcie_dev_t,
+ handle_wake_work);
+
+ PCIE_DBG(dev, "PCIe: Wake work for RC%d\n", dev->rc_idx);
+
+ mutex_lock(&dev->recovery_lock);
+
+ if (!dev->enumerated) {
+ PCIE_DBG(dev,
+ "PCIe: Start enumeration for RC%d upon the wake from endpoint.\n",
+ dev->rc_idx);
+
+ ret = msm_pcie_enumerate(dev->rc_idx);
+ if (ret) {
+ PCIE_ERR(dev,
+ "PCIe: failed to enable RC%d upon wake request from the device.\n",
+ dev->rc_idx);
+ goto out;
+ }
+
+ if (dev->num_ep > 1) {
+ for (i = 0; i < MAX_DEVICE_NUM; i++) {
+ dev->event_reg = dev->pcidev_table[i].event_reg;
+
+ if ((dev->link_status == MSM_PCIE_LINK_ENABLED)
+ && dev->event_reg &&
+ dev->event_reg->callback &&
+ (dev->event_reg->events &
+ MSM_PCIE_EVENT_LINKUP)) {
+ struct msm_pcie_notify *notify =
+ &dev->event_reg->notify;
+ notify->event = MSM_PCIE_EVENT_LINKUP;
+ notify->user = dev->event_reg->user;
+ PCIE_DBG(dev,
+ "PCIe: Linkup callback for RC%d after enumeration is successful in wake IRQ handling\n",
+ dev->rc_idx);
+ dev->event_reg->callback(notify);
+ }
+ }
+ } else {
+ if ((dev->link_status == MSM_PCIE_LINK_ENABLED) &&
+ dev->event_reg && dev->event_reg->callback &&
+ (dev->event_reg->events &
+ MSM_PCIE_EVENT_LINKUP)) {
+ struct msm_pcie_notify *notify =
+ &dev->event_reg->notify;
+ notify->event = MSM_PCIE_EVENT_LINKUP;
+ notify->user = dev->event_reg->user;
+ PCIE_DBG(dev,
+ "PCIe: Linkup callback for RC%d after enumeration is successful in wake IRQ handling\n",
+ dev->rc_idx);
+ dev->event_reg->callback(notify);
+ } else {
+ PCIE_DBG(dev,
+ "PCIe: Client of RC%d does not have registration for linkup event.\n",
+ dev->rc_idx);
+ }
+ }
+ goto out;
+ } else {
+ PCIE_ERR(dev,
+ "PCIe: The enumeration for RC%d has already been done.\n",
+ dev->rc_idx);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&dev->recovery_lock);
+}
+
+static irqreturn_t handle_aer_irq(int irq, void *data)
+{
+ struct msm_pcie_dev_t *dev = data;
+
+ int corr_val = 0, uncorr_val = 0, rc_err_status = 0;
+ int ep_corr_val = 0, ep_uncorr_val = 0;
+ int rc_dev_ctrlstts = 0, ep_dev_ctrlstts = 0;
+ u32 ep_dev_ctrlstts_offset = 0;
+ int i, j, ep_src_bdf = 0;
+ void __iomem *ep_base = NULL;
+
+ PCIE_DBG2(dev,
+ "AER Interrupt handler fired for RC%d irq %d\nrc_corr_counter: %lu\nrc_non_fatal_counter: %lu\nrc_fatal_counter: %lu\nep_corr_counter: %lu\nep_non_fatal_counter: %lu\nep_fatal_counter: %lu\n",
+ dev->rc_idx, irq, dev->rc_corr_counter,
+ dev->rc_non_fatal_counter, dev->rc_fatal_counter,
+ dev->ep_corr_counter, dev->ep_non_fatal_counter,
+ dev->ep_fatal_counter);
+
+ uncorr_val = readl_relaxed(dev->dm_core +
+ PCIE20_AER_UNCORR_ERR_STATUS_REG);
+ corr_val = readl_relaxed(dev->dm_core +
+ PCIE20_AER_CORR_ERR_STATUS_REG);
+ rc_err_status = readl_relaxed(dev->dm_core +
+ PCIE20_AER_ROOT_ERR_STATUS_REG);
+ rc_dev_ctrlstts = readl_relaxed(dev->dm_core +
+ PCIE20_CAP_DEVCTRLSTATUS);
+
+ if (uncorr_val)
+ PCIE_DBG(dev, "RC's PCIE20_AER_UNCORR_ERR_STATUS_REG:0x%x\n",
+ uncorr_val);
+ if (corr_val && (dev->rc_corr_counter < corr_counter_limit))
+ PCIE_DBG(dev, "RC's PCIE20_AER_CORR_ERR_STATUS_REG:0x%x\n",
+ corr_val);
+
+ if ((rc_dev_ctrlstts >> 18) & 0x1)
+ dev->rc_fatal_counter++;
+ if ((rc_dev_ctrlstts >> 17) & 0x1)
+ dev->rc_non_fatal_counter++;
+ if ((rc_dev_ctrlstts >> 16) & 0x1)
+ dev->rc_corr_counter++;
+
+ msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS, 0,
+ BIT(18)|BIT(17)|BIT(16));
+
+ if (dev->link_status == MSM_PCIE_LINK_DISABLED) {
+ PCIE_DBG2(dev, "RC%d link is down\n", dev->rc_idx);
+ goto out;
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (i)
+ ep_src_bdf = readl_relaxed(dev->dm_core +
+ PCIE20_AER_ERR_SRC_ID_REG) & ~0xffff;
+ else
+ ep_src_bdf = (readl_relaxed(dev->dm_core +
+ PCIE20_AER_ERR_SRC_ID_REG) & 0xffff) << 16;
+
+ if (!ep_src_bdf)
+ continue;
+
+ for (j = 0; j < MAX_DEVICE_NUM; j++) {
+ if (ep_src_bdf == dev->pcidev_table[j].bdf) {
+ PCIE_DBG2(dev,
+ "PCIe: %s Error from Endpoint: %02x:%02x.%01x\n",
+ i ? "Uncorrectable" : "Correctable",
+ dev->pcidev_table[j].bdf >> 24,
+ dev->pcidev_table[j].bdf >> 19 & 0x1f,
+ dev->pcidev_table[j].bdf >> 16 & 0x07);
+ ep_base = dev->pcidev_table[j].conf_base;
+ ep_dev_ctrlstts_offset =
+ dev->pcidev_table[j].dev_ctrlstts_offset;
+ break;
+ }
+ }
+
+ if (!ep_base) {
+ PCIE_ERR(dev,
+ "PCIe: RC%d no endpoint found for reported error\n",
+ dev->rc_idx);
+ goto out;
+ }
+
+ ep_uncorr_val = readl_relaxed(ep_base +
+ PCIE20_AER_UNCORR_ERR_STATUS_REG);
+ ep_corr_val = readl_relaxed(ep_base +
+ PCIE20_AER_CORR_ERR_STATUS_REG);
+ ep_dev_ctrlstts = readl_relaxed(ep_base +
+ ep_dev_ctrlstts_offset);
+
+ if (ep_uncorr_val)
+ PCIE_DBG(dev,
+ "EP's PCIE20_AER_UNCORR_ERR_STATUS_REG:0x%x\n",
+ ep_uncorr_val);
+ if (ep_corr_val && (dev->ep_corr_counter < corr_counter_limit))
+ PCIE_DBG(dev,
+ "EP's PCIE20_AER_CORR_ERR_STATUS_REG:0x%x\n",
+ ep_corr_val);
+
+ if ((ep_dev_ctrlstts >> 18) & 0x1)
+ dev->ep_fatal_counter++;
+ if ((ep_dev_ctrlstts >> 17) & 0x1)
+ dev->ep_non_fatal_counter++;
+ if ((ep_dev_ctrlstts >> 16) & 0x1)
+ dev->ep_corr_counter++;
+
+ msm_pcie_write_mask(ep_base + ep_dev_ctrlstts_offset, 0,
+ BIT(18)|BIT(17)|BIT(16));
+
+ msm_pcie_write_reg_field(ep_base,
+ PCIE20_AER_UNCORR_ERR_STATUS_REG,
+ 0x3fff031, 0x3fff031);
+ msm_pcie_write_reg_field(ep_base,
+ PCIE20_AER_CORR_ERR_STATUS_REG,
+ 0xf1c1, 0xf1c1);
+ }
+out:
+ if (((dev->rc_corr_counter < corr_counter_limit) &&
+ (dev->ep_corr_counter < corr_counter_limit)) ||
+ uncorr_val || ep_uncorr_val)
+ PCIE_DBG(dev, "RC's PCIE20_AER_ROOT_ERR_STATUS_REG:0x%x\n",
+ rc_err_status);
+ msm_pcie_write_reg_field(dev->dm_core,
+ PCIE20_AER_UNCORR_ERR_STATUS_REG,
+ 0x3fff031, 0x3fff031);
+ msm_pcie_write_reg_field(dev->dm_core,
+ PCIE20_AER_CORR_ERR_STATUS_REG,
+ 0xf1c1, 0xf1c1);
+ msm_pcie_write_reg_field(dev->dm_core,
+ PCIE20_AER_ROOT_ERR_STATUS_REG,
+ 0x7f, 0x7f);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t handle_wake_irq(int irq, void *data)
+{
+ struct msm_pcie_dev_t *dev = data;
+ unsigned long irqsave_flags;
+ int i;
+
+ spin_lock_irqsave(&dev->wakeup_lock, irqsave_flags);
+
+ dev->wake_counter++;
+ PCIE_DBG(dev, "PCIe: No. %ld wake IRQ for RC%d\n",
+ dev->wake_counter, dev->rc_idx);
+
+ PCIE_DBG2(dev, "PCIe WAKE is asserted by Endpoint of RC%d\n",
+ dev->rc_idx);
+
+ if (!dev->enumerated && !(dev->boot_option &
+ MSM_PCIE_NO_WAKE_ENUMERATION)) {
+ PCIE_DBG(dev, "Start enumerating RC%d\n", dev->rc_idx);
+ schedule_work(&dev->handle_wake_work);
+ } else {
+ PCIE_DBG2(dev, "Wake up RC%d\n", dev->rc_idx);
+ __pm_stay_awake(&dev->ws);
+ __pm_relax(&dev->ws);
+
+ if (dev->num_ep > 1) {
+ for (i = 0; i < MAX_DEVICE_NUM; i++) {
+ dev->event_reg =
+ dev->pcidev_table[i].event_reg;
+ msm_pcie_notify_client(dev,
+ MSM_PCIE_EVENT_WAKEUP);
+ }
+ } else {
+ msm_pcie_notify_client(dev, MSM_PCIE_EVENT_WAKEUP);
+ }
+ }
+
+ spin_unlock_irqrestore(&dev->wakeup_lock, irqsave_flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t handle_linkdown_irq(int irq, void *data)
+{
+ struct msm_pcie_dev_t *dev = data;
+ int i;
+
+ dev->linkdown_counter++;
+
+ PCIE_DBG(dev,
+ "PCIe: No. %ld linkdown IRQ for RC%d.\n",
+ dev->linkdown_counter, dev->rc_idx);
+
+ if (!dev->enumerated || dev->link_status != MSM_PCIE_LINK_ENABLED) {
+ PCIE_DBG(dev,
+ "PCIe:Linkdown IRQ for RC%d when the link is not enabled\n",
+ dev->rc_idx);
+ } else if (dev->suspending) {
+ PCIE_DBG(dev,
+ "PCIe:the link of RC%d is suspending.\n",
+ dev->rc_idx);
+ } else {
+ dev->link_status = MSM_PCIE_LINK_DISABLED;
+ dev->shadow_en = false;
+
+ if (dev->linkdown_panic)
+ panic("User has chosen to panic on linkdown\n");
+
+ /* assert PERST */
+ if (!(msm_pcie_keep_resources_on & BIT(dev->rc_idx)))
+ gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
+ dev->gpio[MSM_PCIE_GPIO_PERST].on);
+
+ PCIE_ERR(dev, "PCIe link is down for RC%d\n", dev->rc_idx);
+
+ if (dev->num_ep > 1) {
+ for (i = 0; i < MAX_DEVICE_NUM; i++) {
+ dev->event_reg =
+ dev->pcidev_table[i].event_reg;
+ msm_pcie_notify_client(dev,
+ MSM_PCIE_EVENT_LINKDOWN);
+ }
+ } else {
+ msm_pcie_notify_client(dev, MSM_PCIE_EVENT_LINKDOWN);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t handle_msi_irq(int irq, void *data)
+{
+ int i, j;
+ unsigned long val;
+ struct msm_pcie_dev_t *dev = data;
+ void __iomem *ctrl_status;
+
+ PCIE_DUMP(dev, "irq: %d\n", irq);
+
+ /*
+ * check for set bits, clear it by setting that bit
+ * and trigger corresponding irq
+ */
+ for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++) {
+ ctrl_status = dev->dm_core +
+ PCIE20_MSI_CTRL_INTR_STATUS + (i * 12);
+
+ val = readl_relaxed(ctrl_status);
+ while (val) {
+ j = find_first_bit(&val, 32);
+ writel_relaxed(BIT(j), ctrl_status);
+ /* ensure that interrupt is cleared (acked) */
+ wmb();
+ generic_handle_irq(
+ irq_find_mapping(dev->irq_domain, (j + (32*i)))
+ );
+ val = readl_relaxed(ctrl_status);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t handle_global_irq(int irq, void *data)
+{
+ int i;
+ struct msm_pcie_dev_t *dev = data;
+ unsigned long irqsave_flags;
+ u32 status = 0;
+
+ spin_lock_irqsave(&dev->irq_lock, irqsave_flags);
+
+ if (dev->suspending) {
+ PCIE_DBG2(dev,
+ "PCIe: RC%d is currently suspending.\n",
+ dev->rc_idx);
+ spin_unlock_irqrestore(&dev->irq_lock, irqsave_flags);
+ return IRQ_HANDLED;
+ }
+
+ status = readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_STATUS) &
+ readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK);
+
+ msm_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_CLEAR, 0, status);
+
+ PCIE_DBG2(dev, "RC%d: Global IRQ %d received: 0x%x\n",
+ dev->rc_idx, irq, status);
+
+ for (i = 0; i <= MSM_PCIE_INT_EVT_MAX; i++) {
+ if (status & BIT(i)) {
+ switch (i) {
+ case MSM_PCIE_INT_EVT_LINK_DOWN:
+ PCIE_DBG(dev,
+ "PCIe: RC%d: handle linkdown event.\n",
+ dev->rc_idx);
+ handle_linkdown_irq(irq, data);
+ break;
+ case MSM_PCIE_INT_EVT_AER_LEGACY:
+ PCIE_DBG(dev,
+ "PCIe: RC%d: AER legacy event.\n",
+ dev->rc_idx);
+ handle_aer_irq(irq, data);
+ break;
+ case MSM_PCIE_INT_EVT_AER_ERR:
+ PCIE_DBG(dev,
+ "PCIe: RC%d: AER event.\n",
+ dev->rc_idx);
+ handle_aer_irq(irq, data);
+ break;
+ default:
+ PCIE_DUMP(dev,
+ "PCIe: RC%d: Unexpected event %d is caught!\n",
+ dev->rc_idx, i);
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&dev->irq_lock, irqsave_flags);
+
+ return IRQ_HANDLED;
+}
+
+static void msm_pcie_destroy_irq(struct msi_desc *entry, unsigned int irq)
+{
+ int pos;
+ struct msm_pcie_dev_t *dev;
+ struct pci_dev *pdev = msi_desc_to_pci_dev(entry);
+
+ if (!pdev) {
+ pr_err("PCIe: pci device is null. IRQ:%d\n", irq);
+ return;
+ }
+
+ dev = PCIE_BUS_PRIV_DATA(pdev->bus);
+ if (!dev) {
+ pr_err("PCIe: could not find RC. IRQ:%d\n", irq);
+ return;
+ }
+
+ PCIE_DBG(dev, "destroy default MSI irq %d\n", irq);
+ pos = irq - irq_find_mapping(dev->irq_domain, 0);
+
+ PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+ PCIE_DBG(dev, "Before clear_bit pos:%d msi_irq_in_use:%ld\n",
+ pos, *dev->msi_irq_in_use);
+ clear_bit(pos, dev->msi_irq_in_use);
+ PCIE_DBG(dev, "After clear_bit pos:%d msi_irq_in_use:%ld\n",
+ pos, *dev->msi_irq_in_use);
+}
+
+/* hookup to linux pci msi framework */
+void arch_teardown_msi_irq(unsigned int irq)
+{
+ struct msi_desc *entry = irq_get_msi_desc(irq);
+
+ PCIE_GEN_DBG("irq %d deallocated\n", irq);
+
+ if (entry)
+ msm_pcie_destroy_irq(entry, irq);
+}
+
+void arch_teardown_msi_irqs(struct pci_dev *dev)
+{
+ struct msi_desc *entry;
+ struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+
+ PCIE_DBG(pcie_dev, "RC:%d EP: vendor_id:0x%x device_id:0x%x\n",
+ pcie_dev->rc_idx, dev->vendor, dev->device);
+
+ pcie_dev->use_msi = false;
+
+ list_for_each_entry(entry, &dev->dev.msi_list, list) {
+ int i, nvec;
+
+ if (entry->irq == 0)
+ continue;
+ nvec = 1 << entry->msi_attrib.multiple;
+ for (i = 0; i < nvec; i++)
+ msm_pcie_destroy_irq(entry, entry->irq + i);
+ }
+}
+
+static void msm_pcie_msi_nop(struct irq_data *d)
+{
+}
+
+static struct irq_chip pcie_msi_chip = {
+ .name = "msm-pcie-msi",
+ .irq_ack = msm_pcie_msi_nop,
+ .irq_enable = unmask_msi_irq,
+ .irq_disable = mask_msi_irq,
+ .irq_mask = mask_msi_irq,
+ .irq_unmask = unmask_msi_irq,
+};
+
+static int msm_pcie_create_irq(struct msm_pcie_dev_t *dev)
+{
+ int irq, pos;
+
+ PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+again:
+ pos = find_first_zero_bit(dev->msi_irq_in_use, PCIE_MSI_NR_IRQS);
+
+ if (pos >= PCIE_MSI_NR_IRQS)
+ return -ENOSPC;
+
+ PCIE_DBG(dev, "pos:%d msi_irq_in_use:%ld\n", pos, *dev->msi_irq_in_use);
+
+ if (test_and_set_bit(pos, dev->msi_irq_in_use))
+ goto again;
+ else
+ PCIE_DBG(dev, "test_and_set_bit is successful pos=%d\n", pos);
+
+ irq = irq_create_mapping(dev->irq_domain, pos);
+ if (!irq)
+ return -EINVAL;
+
+ return irq;
+}
+
+static int arch_setup_msi_irq_default(struct pci_dev *pdev,
+ struct msi_desc *desc, int nvec)
+{
+ int irq;
+ struct msi_msg msg;
+ struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
+
+ PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+ irq = msm_pcie_create_irq(dev);
+
+ PCIE_DBG(dev, "IRQ %d is allocated.\n", irq);
+
+ if (irq < 0)
+ return irq;
+
+ PCIE_DBG(dev, "irq %d allocated\n", irq);
+
+ irq_set_chip_data(irq, pdev);
+ irq_set_msi_desc(irq, desc);
+
+ /* write msi vector and data */
+ msg.address_hi = 0;
+ msg.address_lo = MSM_PCIE_MSI_PHY;
+ msg.data = irq - irq_find_mapping(dev->irq_domain, 0);
+ write_msi_msg(irq, &msg);
+
+ return 0;
+}
+
+int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
+{
+ struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
+
+ PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+ return arch_setup_msi_irq_default(pdev, desc, 1);
+}
+
+int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+ struct msi_desc *entry;
+ int ret;
+ struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+
+ PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
+
+ if (type != PCI_CAP_ID_MSI || nvec > 32)
+ return -ENOSPC;
+
+ PCIE_DBG(pcie_dev, "nvec = %d\n", nvec);
+
+ list_for_each_entry(entry, &dev->dev.msi_list, list) {
+ entry->msi_attrib.multiple =
+ __ilog2_u32(__roundup_pow_of_two(nvec));
+
+ ret = arch_setup_msi_irq_default(dev, entry, nvec);
+
+ PCIE_DBG(pcie_dev, "ret from msi_irq: %d\n", ret);
+
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ return -ENOSPC;
+ }
+
+ pcie_dev->use_msi = true;
+
+ return 0;
+}
+
+static int msm_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler (irq, &pcie_msi_chip, handle_simple_irq);
+ return 0;
+}
+
+static const struct irq_domain_ops msm_pcie_msi_ops = {
+ .map = msm_pcie_msi_map,
+};
+
+static int32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev)
+{
+ int rc;
+ int msi_start = 0;
+ struct device *pdev = &dev->pdev->dev;
+
+ PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+ if (dev->rc_idx)
+ wakeup_source_init(&dev->ws, "RC1 pcie_wakeup_source");
+ else
+ wakeup_source_init(&dev->ws, "RC0 pcie_wakeup_source");
+
+ /* register handler for physical MSI interrupt line */
+ if (dev->irq[MSM_PCIE_INT_MSI].num) {
+ rc = devm_request_irq(pdev,
+ dev->irq[MSM_PCIE_INT_MSI].num,
+ handle_msi_irq,
+ IRQF_TRIGGER_RISING,
+ dev->irq[MSM_PCIE_INT_MSI].name,
+ dev);
+ if (rc) {
+ PCIE_ERR(dev,
+ "PCIe: RC%d: Unable to request MSI interrupt\n",
+ dev->rc_idx);
+ return rc;
+ }
+ }
+
+ if (dev->irq[MSM_PCIE_INT_GLOBAL_INT].num) {
+ rc = devm_request_irq(pdev,
+ dev->irq[MSM_PCIE_INT_GLOBAL_INT].num,
+ handle_global_irq,
+ IRQF_TRIGGER_RISING,
+ dev->irq[MSM_PCIE_INT_GLOBAL_INT].name,
+ dev);
+ if (rc) {
+ PCIE_ERR(dev,
+ "PCIe: RC%d: Unable to request global_int interrupt: %d\n",
+ dev->rc_idx,
+ dev->irq[MSM_PCIE_INT_GLOBAL_INT].num);
+ return rc;
+ }
+ }
+
+ /* register handler for PCIE_WAKE_N interrupt line */
+ if (dev->wake_n) {
+ rc = devm_request_irq(pdev,
+ dev->wake_n, handle_wake_irq,
+ IRQF_TRIGGER_FALLING, "msm_pcie_wake", dev);
+ if (rc) {
+ PCIE_ERR(dev,
+ "PCIe: RC%d: Unable to request wake interrupt\n",
+ dev->rc_idx);
+ return rc;
+ }
+
+ INIT_WORK(&dev->handle_wake_work, handle_wake_func);
+
+ rc = enable_irq_wake(dev->wake_n);
+ if (rc) {
+ PCIE_ERR(dev,
+ "PCIe: RC%d: Unable to enable wake interrupt\n",
+ dev->rc_idx);
+ return rc;
+ }
+ }
+
+ /* Create a virtual domain of interrupts */
+ if (!IS_ENABLED(CONFIG_PCI_MSM_MSI)) {
+ dev->irq_domain = irq_domain_add_linear(dev->pdev->dev.of_node,
+ PCIE_MSI_NR_IRQS, &msm_pcie_msi_ops, dev);
+
+ if (!dev->irq_domain) {
+ PCIE_ERR(dev,
+ "PCIe: RC%d: Unable to initialize irq domain\n",
+ dev->rc_idx);
+
+ if (dev->wake_n)
+ disable_irq(dev->wake_n);
+
+ return PTR_ERR(dev->irq_domain);
+ }
+
+ msi_start = irq_create_mapping(dev->irq_domain, 0);
+ }
+
+ return 0;
+}
+
+static void msm_pcie_irq_deinit(struct msm_pcie_dev_t *dev)
+{
+ PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+ wakeup_source_trash(&dev->ws);
+
+ if (dev->wake_n)
+ disable_irq(dev->wake_n);
+}
+
+static bool msm_pcie_check_l0s_support(struct pci_dev *pdev,
+ struct msm_pcie_dev_t *pcie_dev)
+{
+ struct pci_dev *parent = pdev->bus->self;
+ u32 val;
+
+ /* check parent supports L0s */
+ if (parent) {
+ u32 val2;
+
+ pci_read_config_dword(parent, parent->pcie_cap + PCI_EXP_LNKCAP,
+ &val);
+ pci_read_config_dword(parent, parent->pcie_cap + PCI_EXP_LNKCTL,
+ &val2);
+ val = (val & BIT(10)) && (val2 & PCI_EXP_LNKCTL_ASPM_L0S);
+ if (!val) {
+ PCIE_DBG(pcie_dev,
+ "PCIe: RC%d: Parent PCI device %02x:%02x.%01x does not support L0s\n",
+ pcie_dev->rc_idx, parent->bus->number,
+ PCI_SLOT(parent->devfn),
+ PCI_FUNC(parent->devfn));
+ return false;
+ }
+ }
+
+ pci_read_config_dword(pdev, pdev->pcie_cap + PCI_EXP_LNKCAP, &val);
+ if (!(val & BIT(10))) {
+ PCIE_DBG(pcie_dev,
+ "PCIe: RC%d: PCI device %02x:%02x.%01x does not support L0s\n",
+ pcie_dev->rc_idx, pdev->bus->number,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+ return false;
+ }
+
+ return true;
+}
+
+static bool msm_pcie_check_l1_support(struct pci_dev *pdev,
+ struct msm_pcie_dev_t *pcie_dev)
+{
+ struct pci_dev *parent = pdev->bus->self;
+ u32 val;
+
+ /* check parent supports L1 */
+ if (parent) {
+ u32 val2;
+
+ pci_read_config_dword(parent, parent->pcie_cap + PCI_EXP_LNKCAP,
+ &val);
+ pci_read_config_dword(parent, parent->pcie_cap + PCI_EXP_LNKCTL,
+ &val2);
+ val = (val & BIT(11)) && (val2 & PCI_EXP_LNKCTL_ASPM_L1);
+ if (!val) {
+ PCIE_DBG(pcie_dev,
+ "PCIe: RC%d: Parent PCI device %02x:%02x.%01x does not support L1\n",
+ pcie_dev->rc_idx, parent->bus->number,
+ PCI_SLOT(parent->devfn),
+ PCI_FUNC(parent->devfn));
+ return false;
+ }
+ }
+
+ pci_read_config_dword(pdev, pdev->pcie_cap + PCI_EXP_LNKCAP, &val);
+ if (!(val & BIT(11))) {
+ PCIE_DBG(pcie_dev,
+ "PCIe: RC%d: PCI device %02x:%02x.%01x does not support L1\n",
+ pcie_dev->rc_idx, pdev->bus->number,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+ return false;
+ }
+
+ return true;
+}
+
+static int msm_pcie_check_l1ss_support(struct pci_dev *pdev, void *dev)
+{
+ struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
+ u32 val;
+ u32 l1ss_cap_id_offset, l1ss_cap_offset, l1ss_ctl1_offset;
+
+ if (!pcie_dev->l1ss_supported)
+ return -ENXIO;
+
+ l1ss_cap_id_offset = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+ if (!l1ss_cap_id_offset) {
+ PCIE_DBG(pcie_dev,
+ "PCIe: RC%d: PCI device %02x:%02x.%01x could not find L1ss capability register\n",
+ pcie_dev->rc_idx, pdev->bus->number,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+ pcie_dev->l1ss_supported = false;
+ return -ENXIO;
+ }
+
+ l1ss_cap_offset = l1ss_cap_id_offset + PCI_L1SS_CAP;
+ l1ss_ctl1_offset = l1ss_cap_id_offset + PCI_L1SS_CTL1;
+
+ pci_read_config_dword(pdev, l1ss_cap_offset, &val);
+ pcie_dev->l1_1_pcipm_supported &= !!(val & (PCI_L1SS_CAP_PCIPM_L1_1));
+ pcie_dev->l1_2_pcipm_supported &= !!(val & (PCI_L1SS_CAP_PCIPM_L1_2));
+ pcie_dev->l1_1_aspm_supported &= !!(val & (PCI_L1SS_CAP_ASPM_L1_1));
+ pcie_dev->l1_2_aspm_supported &= !!(val & (PCI_L1SS_CAP_ASPM_L1_2));
+ if (!pcie_dev->l1_1_pcipm_supported &&
+ !pcie_dev->l1_2_pcipm_supported &&
+ !pcie_dev->l1_1_aspm_supported &&
+ !pcie_dev->l1_2_aspm_supported) {
+ PCIE_DBG(pcie_dev,
+ "PCIe: RC%d: PCI device %02x:%02x.%01x does not support any L1ss\n",
+ pcie_dev->rc_idx, pdev->bus->number,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+ pcie_dev->l1ss_supported = false;
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int msm_pcie_config_common_clock_enable(struct pci_dev *pdev,
+ void *dev)
+{
+ struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
+
+ PCIE_DBG(pcie_dev, "PCIe: RC%d: PCI device %02x:%02x.%01x\n",
+ pcie_dev->rc_idx, pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn));
+
+ msm_pcie_config_clear_set_dword(pdev, pdev->pcie_cap + PCI_EXP_LNKCTL,
+ 0, PCI_EXP_LNKCTL_CCC);
+
+ return 0;
+}
+
+static void msm_pcie_config_common_clock_enable_all(struct msm_pcie_dev_t *dev)
+{
+ if (dev->common_clk_en)
+ pci_walk_bus(dev->dev->bus,
+ msm_pcie_config_common_clock_enable, dev);
+}
+
+static int msm_pcie_config_clock_power_management_enable(struct pci_dev *pdev,
+ void *dev)
+{
+ struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
+ u32 val;
+
+ /* enable only for upstream ports */
+ if (pci_is_root_bus(pdev->bus))
+ return 0;
+
+ PCIE_DBG(pcie_dev, "PCIe: RC%d: PCI device %02x:%02x.%01x\n",
+ pcie_dev->rc_idx, pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn));
+
+ pci_read_config_dword(pdev, pdev->pcie_cap + PCI_EXP_LNKCAP, &val);
+ if (val & PCI_EXP_LNKCAP_CLKPM)
+ msm_pcie_config_clear_set_dword(pdev,
+ pdev->pcie_cap + PCI_EXP_LNKCTL, 0,
+ PCI_EXP_LNKCTL_CLKREQ_EN);
+ else
+ PCIE_DBG(pcie_dev,
+ "PCIe: RC%d: PCI device %02x:%02x.%01x does not support clock power management\n",
+ pcie_dev->rc_idx, pdev->bus->number,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+
+ return 0;
+}
+
+static void msm_pcie_config_clock_power_management_enable_all(
+ struct msm_pcie_dev_t *dev)
+{
+ if (dev->clk_power_manage_en)
+ pci_walk_bus(dev->dev->bus,
+ msm_pcie_config_clock_power_management_enable, dev);
+}
+
+static void msm_pcie_config_l0s(struct msm_pcie_dev_t *dev,
+ struct pci_dev *pdev, bool enable)
+{
+ u32 lnkctl_offset = pdev->pcie_cap + PCI_EXP_LNKCTL;
+ int ret;
+
+ PCIE_DBG(dev, "PCIe: RC%d: PCI device %02x:%02x.%01x %s\n",
+ dev->rc_idx, pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn), enable ? "enable" : "disable");
+
+ if (enable) {
+ ret = msm_pcie_check_l0s_support(pdev, dev);
+ if (!ret)
+ return;
+
+ msm_pcie_config_clear_set_dword(pdev, lnkctl_offset, 0,
+ PCI_EXP_LNKCTL_ASPM_L0S);
+ } else {
+ msm_pcie_config_clear_set_dword(pdev, lnkctl_offset,
+ PCI_EXP_LNKCTL_ASPM_L0S, 0);
+ }
+}
+
+static void msm_pcie_config_l0s_disable_all(struct msm_pcie_dev_t *dev,
+ struct pci_bus *bus)
+{
+ struct pci_dev *pdev;
+
+ if (!dev->l0s_supported)
+ return;
+
+ list_for_each_entry(pdev, &bus->devices, bus_list) {
+ struct pci_bus *child;
+
+ child = pdev->subordinate;
+ if (child)
+ msm_pcie_config_l0s_disable_all(dev, child);
+ msm_pcie_config_l0s(dev, pdev, false);
+ }
+}
+
+static int msm_pcie_config_l0s_enable(struct pci_dev *pdev, void *dev)
+{
+ struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
+
+ msm_pcie_config_l0s(pcie_dev, pdev, true);
+ return 0;
+}
+
+static void msm_pcie_config_l0s_enable_all(struct msm_pcie_dev_t *dev)
+{
+ if (dev->l0s_supported)
+ pci_walk_bus(dev->dev->bus, msm_pcie_config_l0s_enable, dev);
+}
+
+static void msm_pcie_config_l1(struct msm_pcie_dev_t *dev,
+ struct pci_dev *pdev, bool enable)
+{
+ u32 lnkctl_offset = pdev->pcie_cap + PCI_EXP_LNKCTL;
+ int ret;
+
+ PCIE_DBG(dev, "PCIe: RC%d: PCI device %02x:%02x.%01x %s\n",
+ dev->rc_idx, pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn), enable ? "enable" : "disable");
+
+ if (enable) {
+ ret = msm_pcie_check_l1_support(pdev, dev);
+ if (!ret)
+ return;
+
+ msm_pcie_config_clear_set_dword(pdev, lnkctl_offset, 0,
+ PCI_EXP_LNKCTL_ASPM_L1);
+ } else {
+ msm_pcie_config_clear_set_dword(pdev, lnkctl_offset,
+ PCI_EXP_LNKCTL_ASPM_L1, 0);
+ }
+}
+
+static void msm_pcie_config_l1_disable_all(struct msm_pcie_dev_t *dev,
+ struct pci_bus *bus)
+{
+ struct pci_dev *pdev;
+
+ if (!dev->l1_supported)
+ return;
+
+ list_for_each_entry(pdev, &bus->devices, bus_list) {
+ struct pci_bus *child;
+
+ child = pdev->subordinate;
+ if (child)
+ msm_pcie_config_l1_disable_all(dev, child);
+ msm_pcie_config_l1(dev, pdev, false);
+ }
+}
+
+static int msm_pcie_config_l1_enable(struct pci_dev *pdev, void *dev)
+{
+ struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
+
+ msm_pcie_config_l1(pcie_dev, pdev, true);
+ return 0;
+}
+
+static void msm_pcie_config_l1_enable_all(struct msm_pcie_dev_t *dev)
+{
+ if (dev->l1_supported)
+ pci_walk_bus(dev->dev->bus, msm_pcie_config_l1_enable, dev);
+}
+
+static void msm_pcie_config_l1ss(struct msm_pcie_dev_t *dev,
+ struct pci_dev *pdev, bool enable)
+{
+ u32 val, val2;
+ u32 l1ss_cap_id_offset, l1ss_ctl1_offset;
+ u32 devctl2_offset = pdev->pcie_cap + PCI_EXP_DEVCTL2;
+
+ PCIE_DBG(dev, "PCIe: RC%d: PCI device %02x:%02x.%01x %s\n",
+ dev->rc_idx, pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn), enable ? "enable" : "disable");
+
+ l1ss_cap_id_offset = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+ if (!l1ss_cap_id_offset) {
+ PCIE_DBG(dev,
+ "PCIe: RC%d: PCI device %02x:%02x.%01x could not find L1ss capability register\n",
+ dev->rc_idx, pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn));
+ return;
+ }
+
+ l1ss_ctl1_offset = l1ss_cap_id_offset + PCI_L1SS_CTL1;
+
+ /* Enable the AUX Clock and the Core Clk to be synchronous for L1ss */
+ if (pci_is_root_bus(pdev->bus) && !dev->aux_clk_sync) {
+ if (enable)
+ msm_pcie_write_mask(dev->parf +
+ PCIE20_PARF_SYS_CTRL, BIT(3), 0);
+ else
+ msm_pcie_write_mask(dev->parf +
+ PCIE20_PARF_SYS_CTRL, 0, BIT(3));
+ }
+
+ if (enable) {
+ msm_pcie_config_clear_set_dword(pdev, devctl2_offset, 0,
+ PCI_EXP_DEVCTL2_LTR_EN);
+
+ msm_pcie_config_clear_set_dword(pdev, l1ss_ctl1_offset, 0,
+ (dev->l1_1_pcipm_supported ?
+ PCI_L1SS_CTL1_PCIPM_L1_1 : 0) |
+ (dev->l1_2_pcipm_supported ?
+ PCI_L1SS_CTL1_PCIPM_L1_2 : 0) |
+ (dev->l1_1_aspm_supported ?
+ PCI_L1SS_CTL1_ASPM_L1_1 : 0) |
+ (dev->l1_2_aspm_supported ?
+ PCI_L1SS_CTL1_ASPM_L1_2 : 0));
+ } else {
+ msm_pcie_config_clear_set_dword(pdev, devctl2_offset,
+ PCI_EXP_DEVCTL2_LTR_EN, 0);
+
+ msm_pcie_config_clear_set_dword(pdev, l1ss_ctl1_offset,
+ PCI_L1SS_CTL1_PCIPM_L1_1 | PCI_L1SS_CTL1_PCIPM_L1_2 |
+ PCI_L1SS_CTL1_ASPM_L1_1 | PCI_L1SS_CTL1_ASPM_L1_2, 0);
+ }
+
+ pci_read_config_dword(pdev, l1ss_ctl1_offset, &val);
+ PCIE_DBG2(dev, "PCIe: RC%d: L1SUB_CONTROL1:0x%x\n", dev->rc_idx, val);
+
+ pci_read_config_dword(pdev, devctl2_offset, &val2);
+ PCIE_DBG2(dev, "PCIe: RC%d: DEVICE_CONTROL2_STATUS2::0x%x\n",
+ dev->rc_idx, val2);
+}
+
+static int msm_pcie_config_l1ss_disable(struct pci_dev *pdev, void *dev)
+{
+ struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
+
+ msm_pcie_config_l1ss(pcie_dev, pdev, false);
+ return 0;
+}
+
+static void msm_pcie_config_l1ss_disable_all(struct msm_pcie_dev_t *dev,
+ struct pci_bus *bus)
+{
+ struct pci_dev *pdev;
+
+ if (!dev->l1ss_supported)
+ return;
+
+ list_for_each_entry(pdev, &bus->devices, bus_list) {
+ struct pci_bus *child;
+
+ child = pdev->subordinate;
+ if (child)
+ msm_pcie_config_l1ss_disable_all(dev, child);
+ msm_pcie_config_l1ss_disable(pdev, dev);
+ }
+}
+
+static int msm_pcie_config_l1ss_enable(struct pci_dev *pdev, void *dev)
+{
+ struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
+
+ msm_pcie_config_l1ss(pcie_dev, pdev, true);
+ return 0;
+}
+
+static void msm_pcie_config_l1ss_enable_all(struct msm_pcie_dev_t *dev)
+{
+ if (dev->l1ss_supported)
+ pci_walk_bus(dev->dev->bus, msm_pcie_config_l1ss_enable, dev);
+}
+
+static void msm_pcie_config_link_pm(struct msm_pcie_dev_t *dev, bool enable)
+{
+ struct pci_bus *bus = dev->dev->bus;
+
+ if (enable) {
+ msm_pcie_config_common_clock_enable_all(dev);
+ msm_pcie_config_clock_power_management_enable_all(dev);
+ msm_pcie_config_l1ss_enable_all(dev);
+ msm_pcie_config_l1_enable_all(dev);
+ msm_pcie_config_l0s_enable_all(dev);
+ } else {
+ msm_pcie_config_l0s_disable_all(dev, bus);
+ msm_pcie_config_l1_disable_all(dev, bus);
+ msm_pcie_config_l1ss_disable_all(dev, bus);
+ }
+}
+
+static void msm_pcie_check_l1ss_support_all(struct msm_pcie_dev_t *dev)
+{
+ pci_walk_bus(dev->dev->bus, msm_pcie_check_l1ss_support, dev);
+}
+
+static int msm_pcie_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ int rc_idx = -1;
+ int i, j;
+ struct msm_pcie_dev_t *pcie_dev;
+ struct device_node *of_node;
+
+ PCIE_GEN_DBG("%s\n", __func__);
+
+ mutex_lock(&pcie_drv.drv_lock);
+
+ of_node = pdev->dev.of_node;
+
+ ret = of_property_read_u32(of_node, "cell-index", &rc_idx);
+ if (ret) {
+ PCIE_GEN_DBG("PCIe: Did not find RC index.\n");
+ goto out;
+ }
+
+ if (rc_idx >= MAX_RC_NUM)
+ goto out;
+
+ pcie_drv.rc_num++;
+ pcie_dev = &msm_pcie_dev[rc_idx];
+ pcie_dev->rc_idx = rc_idx;
+ pcie_dev->pdev = pdev;
+ pcie_dev->link_status = MSM_PCIE_LINK_DEINIT;
+
+ PCIE_DBG(pcie_dev, "PCIe: RC index is %d.\n", pcie_dev->rc_idx);
+
+ pcie_dev->l0s_supported = !of_property_read_bool(of_node,
+ "qcom,no-l0s-supported");
+ if (msm_pcie_invert_l0s_support & BIT(pcie_dev->rc_idx))
+ pcie_dev->l0s_supported = !pcie_dev->l0s_supported;
+ PCIE_DBG(pcie_dev, "L0s is %s supported.\n", pcie_dev->l0s_supported ?
+ "" : "not");
+
+ pcie_dev->l1_supported = !of_property_read_bool(of_node,
+ "qcom,no-l1-supported");
+ if (msm_pcie_invert_l1_support & BIT(pcie_dev->rc_idx))
+ pcie_dev->l1_supported = !pcie_dev->l1_supported;
+ PCIE_DBG(pcie_dev, "L1 is %s supported.\n", pcie_dev->l1_supported ?
+ "" : "not");
+
+ pcie_dev->l1ss_supported = !of_property_read_bool(of_node,
+ "qcom,no-l1ss-supported");
+ if (msm_pcie_invert_l1ss_support & BIT(pcie_dev->rc_idx))
+ pcie_dev->l1ss_supported = !pcie_dev->l1ss_supported;
+ PCIE_DBG(pcie_dev, "L1ss is %s supported.\n", pcie_dev->l1ss_supported ?
+ "" : "not");
+
+ pcie_dev->l1_1_aspm_supported = pcie_dev->l1ss_supported;
+ pcie_dev->l1_2_aspm_supported = pcie_dev->l1ss_supported;
+ pcie_dev->l1_1_pcipm_supported = pcie_dev->l1ss_supported;
+ pcie_dev->l1_2_pcipm_supported = pcie_dev->l1ss_supported;
+
+ pcie_dev->common_clk_en = of_property_read_bool(of_node,
+ "qcom,common-clk-en");
+ PCIE_DBG(pcie_dev, "Common clock is %s enabled.\n",
+ pcie_dev->common_clk_en ? "" : "not");
+
+ pcie_dev->clk_power_manage_en = of_property_read_bool(of_node,
+ "qcom,clk-power-manage-en");
+ PCIE_DBG(pcie_dev, "Clock power management is %s enabled.\n",
+ pcie_dev->clk_power_manage_en ? "" : "not");
+
+ pcie_dev->aux_clk_sync = !of_property_read_bool(of_node,
+ "qcom,no-aux-clk-sync");
+ PCIE_DBG(pcie_dev, "AUX clock is %s synchronous to Core clock.\n",
+ pcie_dev->aux_clk_sync ? "" : "not");
+
+ pcie_dev->use_19p2mhz_aux_clk = of_property_read_bool(of_node,
+ "qcom,use-19p2mhz-aux-clk");
+ PCIE_DBG(pcie_dev, "AUX clock frequency is %s 19.2MHz.\n",
+ pcie_dev->use_19p2mhz_aux_clk ? "" : "not");
+
+ of_property_read_u32(of_node, "qcom,smmu-sid-base",
+ &pcie_dev->smmu_sid_base);
+ PCIE_DBG(pcie_dev, "RC%d: qcom,smmu-sid-base: 0x%x.\n",
+ pcie_dev->rc_idx, pcie_dev->smmu_sid_base);
+
+ of_property_read_u32(of_node, "qcom,boot-option",
+ &pcie_dev->boot_option);
+ PCIE_DBG(pcie_dev, "PCIe: RC%d boot option is 0x%x.\n",
+ pcie_dev->rc_idx, pcie_dev->boot_option);
+
+ of_property_read_u32(of_node, "qcom,pcie-phy-ver",
+ &pcie_dev->phy_ver);
+ PCIE_DBG(pcie_dev, "RC%d: pcie-phy-ver: %d.\n", pcie_dev->rc_idx,
+ pcie_dev->phy_ver);
+
+ pcie_dev->link_check_max_count = LINK_UP_CHECK_MAX_COUNT;
+ of_property_read_u32(pdev->dev.of_node,
+ "qcom,link-check-max-count",
+ &pcie_dev->link_check_max_count);
+ PCIE_DBG(pcie_dev, "PCIe: RC%d: link-check-max-count: %u.\n",
+ pcie_dev->rc_idx, pcie_dev->link_check_max_count);
+
+ of_property_read_u32(of_node, "qcom,target-link-speed",
+ &pcie_dev->target_link_speed);
+ PCIE_DBG(pcie_dev, "PCIe: RC%d: target-link-speed: 0x%x.\n",
+ pcie_dev->rc_idx, pcie_dev->target_link_speed);
+
+ of_property_read_u32(of_node, "qcom,n-fts", &pcie_dev->n_fts);
+ PCIE_DBG(pcie_dev, "n-fts: 0x%x.\n", pcie_dev->n_fts);
+
+ of_property_read_u32(of_node, "qcom,ep-latency",
+ &pcie_dev->ep_latency);
+ PCIE_DBG(pcie_dev, "RC%d: ep-latency: %ums.\n", pcie_dev->rc_idx,
+ pcie_dev->ep_latency);
+
+ of_property_read_u32(of_node, "qcom,switch-latency",
+ &pcie_dev->switch_latency);
+ PCIE_DBG(pcie_dev, "RC%d: switch-latency: %ums.\n", pcie_dev->rc_idx,
+ pcie_dev->switch_latency);
+
+ ret = of_property_read_u32(of_node, "qcom,wr-halt-size",
+ &pcie_dev->wr_halt_size);
+ if (ret)
+ PCIE_DBG(pcie_dev,
+ "RC%d: wr-halt-size not specified in dt. Use default value.\n",
+ pcie_dev->rc_idx);
+ else
+ PCIE_DBG(pcie_dev, "RC%d: wr-halt-size: 0x%x.\n",
+ pcie_dev->rc_idx, pcie_dev->wr_halt_size);
+
+ pcie_dev->slv_addr_space_size = SZ_16M;
+ of_property_read_u32(of_node, "qcom,slv-addr-space-size",
+ &pcie_dev->slv_addr_space_size);
+ PCIE_DBG(pcie_dev, "RC%d: slv-addr-space-size: 0x%x.\n",
+ pcie_dev->rc_idx, pcie_dev->slv_addr_space_size);
+
+ of_property_read_u32(of_node, "qcom,phy-status-offset",
+ &pcie_dev->phy_status_offset);
+ PCIE_DBG(pcie_dev, "RC%d: phy-status-offset: 0x%x.\n", pcie_dev->rc_idx,
+ pcie_dev->phy_status_offset);
+
+ of_property_read_u32(of_node, "qcom,phy-power-down-offset",
+ &pcie_dev->phy_power_down_offset);
+ PCIE_DBG(pcie_dev, "RC%d: phy-power-down-offset: 0x%x.\n",
+ pcie_dev->rc_idx, pcie_dev->phy_power_down_offset);
+
+ of_property_read_u32(of_node, "qcom,cpl-timeout",
+ &pcie_dev->cpl_timeout);
+ PCIE_DBG(pcie_dev, "RC%d: cpl-timeout: 0x%x.\n",
+ pcie_dev->rc_idx, pcie_dev->cpl_timeout);
+
+ pcie_dev->perst_delay_us_min = PERST_PROPAGATION_DELAY_US_MIN;
+ pcie_dev->perst_delay_us_max = PERST_PROPAGATION_DELAY_US_MAX;
+ of_property_read_u32(of_node, "qcom,perst-delay-us-min",
+ &pcie_dev->perst_delay_us_min);
+ of_property_read_u32(of_node, "qcom,perst-delay-us-max",
+ &pcie_dev->perst_delay_us_max);
+ PCIE_DBG(pcie_dev,
+ "RC%d: perst-delay-us-min: %dus. perst-delay-us-max: %dus.\n",
+ pcie_dev->rc_idx, pcie_dev->perst_delay_us_min,
+ pcie_dev->perst_delay_us_max);
+
+ pcie_dev->tlp_rd_size = PCIE_TLP_RD_SIZE;
+ of_property_read_u32(of_node, "qcom,tlp-rd-size",
+ &pcie_dev->tlp_rd_size);
+ PCIE_DBG(pcie_dev, "RC%d: tlp-rd-size: 0x%x.\n", pcie_dev->rc_idx,
+ pcie_dev->tlp_rd_size);
+
+ pcie_dev->shadow_en = true;
+ pcie_dev->aer_enable = true;
+ if (msm_pcie_invert_aer_support)
+ pcie_dev->aer_enable = !pcie_dev->aer_enable;
+
+ memcpy(pcie_dev->vreg, msm_pcie_vreg_info, sizeof(msm_pcie_vreg_info));
+ memcpy(pcie_dev->gpio, msm_pcie_gpio_info, sizeof(msm_pcie_gpio_info));
+ memcpy(pcie_dev->clk, msm_pcie_clk_info[rc_idx],
+ sizeof(msm_pcie_clk_info[rc_idx]));
+ memcpy(pcie_dev->pipeclk, msm_pcie_pipe_clk_info[rc_idx],
+ sizeof(msm_pcie_pipe_clk_info[rc_idx]));
+ memcpy(pcie_dev->res, msm_pcie_res_info, sizeof(msm_pcie_res_info));
+ memcpy(pcie_dev->irq, msm_pcie_irq_info, sizeof(msm_pcie_irq_info));
+ memcpy(pcie_dev->reset, msm_pcie_reset_info[rc_idx],
+ sizeof(msm_pcie_reset_info[rc_idx]));
+ memcpy(pcie_dev->pipe_reset, msm_pcie_pipe_reset_info[rc_idx],
+ sizeof(msm_pcie_pipe_reset_info[rc_idx]));
+
+ for (i = 0; i < PCIE_CONF_SPACE_DW; i++)
+ pcie_dev->rc_shadow[i] = PCIE_CLEAR;
+ for (i = 0; i < MAX_DEVICE_NUM; i++)
+ for (j = 0; j < PCIE_CONF_SPACE_DW; j++)
+ pcie_dev->ep_shadow[i][j] = PCIE_CLEAR;
+ for (i = 0; i < MAX_DEVICE_NUM; i++) {
+ pcie_dev->pcidev_table[i].bdf = 0;
+ pcie_dev->pcidev_table[i].dev = NULL;
+ pcie_dev->pcidev_table[i].short_bdf = 0;
+ pcie_dev->pcidev_table[i].sid = 0;
+ pcie_dev->pcidev_table[i].domain = rc_idx;
+ pcie_dev->pcidev_table[i].conf_base = NULL;
+ pcie_dev->pcidev_table[i].phy_address = 0;
+ pcie_dev->pcidev_table[i].dev_ctrlstts_offset = 0;
+ pcie_dev->pcidev_table[i].event_reg = NULL;
+ pcie_dev->pcidev_table[i].registered = true;
+ }
+
+ dev_set_drvdata(&pdev->dev, pcie_dev);
+
+ ret = msm_pcie_get_resources(pcie_dev, pcie_dev->pdev);
+ if (ret)
+ goto decrease_rc_num;
+
+ if (pcie_dev->rumi)
+ pcie_dev->rumi_init = msm_pcie_rumi_init;
+
+ pcie_dev->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR_OR_NULL(pcie_dev->pinctrl))
+ PCIE_ERR(pcie_dev, "PCIe: RC%d failed to get pinctrl\n",
+ pcie_dev->rc_idx);
+ else
+ pcie_dev->use_pinctrl = true;
+
+ if (pcie_dev->use_pinctrl) {
+ pcie_dev->pins_default = pinctrl_lookup_state(pcie_dev->pinctrl,
+ "default");
+ if (IS_ERR(pcie_dev->pins_default)) {
+ PCIE_ERR(pcie_dev,
+ "PCIe: RC%d could not get pinctrl default state\n",
+ pcie_dev->rc_idx);
+ pcie_dev->pins_default = NULL;
+ }
+
+ pcie_dev->pins_sleep = pinctrl_lookup_state(pcie_dev->pinctrl,
+ "sleep");
+ if (IS_ERR(pcie_dev->pins_sleep)) {
+ PCIE_ERR(pcie_dev,
+ "PCIe: RC%d could not get pinctrl sleep state\n",
+ pcie_dev->rc_idx);
+ pcie_dev->pins_sleep = NULL;
+ }
+ }
+
+ ret = msm_pcie_gpio_init(pcie_dev);
+ if (ret) {
+ msm_pcie_release_resources(pcie_dev);
+ goto decrease_rc_num;
+ }
+
+ ret = msm_pcie_irq_init(pcie_dev);
+ if (ret) {
+ msm_pcie_release_resources(pcie_dev);
+ msm_pcie_gpio_deinit(pcie_dev);
+ goto decrease_rc_num;
+ }
+
+ msm_pcie_sysfs_init(pcie_dev);
+
+ pcie_dev->drv_ready = true;
+
+ if (pcie_dev->boot_option & MSM_PCIE_NO_PROBE_ENUMERATION) {
+ PCIE_DBG(pcie_dev,
+ "PCIe: RC%d will be enumerated by client or endpoint.\n",
+ pcie_dev->rc_idx);
+ mutex_unlock(&pcie_drv.drv_lock);
+ return 0;
+ }
+
+ ret = msm_pcie_enumerate(rc_idx);
+ if (ret)
+ PCIE_ERR(pcie_dev,
+ "PCIe: RC%d is not enabled during bootup; it will be enumerated upon client request.\n",
+ pcie_dev->rc_idx);
+ else
+ PCIE_ERR(pcie_dev, "RC%d is enabled in bootup\n",
+ pcie_dev->rc_idx);
+
+ PCIE_DBG(pcie_dev, "PCIe probed %s\n", dev_name(&pdev->dev));
+
+ mutex_unlock(&pcie_drv.drv_lock);
+ return 0;
+
+decrease_rc_num:
+ pcie_drv.rc_num--;
+ PCIE_ERR(pcie_dev, "PCIe: RC%d: Driver probe failed. ret: %d\n",
+ pcie_dev->rc_idx, ret);
+out:
+ if (rc_idx < 0 || rc_idx >= MAX_RC_NUM)
+ pr_err("PCIe: Invalid RC index %d. Driver probe failed\n",
+ rc_idx);
+
+ mutex_unlock(&pcie_drv.drv_lock);
+
+ return ret;
+}
+
+static int msm_pcie_remove(struct platform_device *pdev)
+{
+ int ret = 0;
+ int rc_idx;
+
+ PCIE_GEN_DBG("PCIe:%s.\n", __func__);
+
+ mutex_lock(&pcie_drv.drv_lock);
+
+ ret = of_property_read_u32((&pdev->dev)->of_node,
+ "cell-index", &rc_idx);
+ if (ret) {
+ pr_err("%s: Did not find RC index.\n", __func__);
+ goto out;
+ } else {
+ pcie_drv.rc_num--;
+ PCIE_GEN_DBG("%s: RC index is 0x%x.", __func__, rc_idx);
+ }
+
+ msm_pcie_irq_deinit(&msm_pcie_dev[rc_idx]);
+ msm_pcie_vreg_deinit(&msm_pcie_dev[rc_idx]);
+ msm_pcie_clk_deinit(&msm_pcie_dev[rc_idx]);
+ msm_pcie_gpio_deinit(&msm_pcie_dev[rc_idx]);
+ msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
+
+out:
+ mutex_unlock(&pcie_drv.drv_lock);
+
+ return ret;
+}
+
+static int msm_pci_iommu_parse_dt(struct msm_root_dev_t *root_dev)
+{
+ int ret;
+ struct msm_pcie_dev_t *pcie_dev = root_dev->pcie_dev;
+ struct pci_dev *pci_dev = root_dev->pci_dev;
+ struct device_node *pci_of_node = pci_dev->dev.of_node;
+
+ ret = of_property_read_u32(pci_of_node, "qcom,iommu-cfg",
+ &root_dev->iommu_cfg);
+ if (ret) {
+ PCIE_DBG(pcie_dev, "PCIe: RC%d: no iommu-cfg present in DT\n",
+ pcie_dev->rc_idx);
+ return 0;
+ }
+
+ if (root_dev->iommu_cfg & MSM_PCIE_IOMMU_S1_BYPASS) {
+ root_dev->iommu_base = 0;
+ root_dev->iommu_size = PAGE_SIZE;
+ } else {
+ u64 iommu_range[2];
+
+ ret = of_property_count_elems_of_size(pci_of_node,
+ "qcom,iommu-range",
+ sizeof(iommu_range));
+ if (ret != 1) {
+ PCIE_ERR(pcie_dev,
+ "invalid entry for iommu address: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = of_property_read_u64_array(pci_of_node,
+ "qcom,iommu-range",
+ iommu_range, 2);
+ if (ret) {
+ PCIE_ERR(pcie_dev,
+ "failed to get iommu address: %d\n", ret);
+ return ret;
+ }
+
+ root_dev->iommu_base = (dma_addr_t)iommu_range[0];
+ root_dev->iommu_size = (size_t)iommu_range[1];
+ }
+
+ PCIE_DBG(pcie_dev,
+ "iommu-cfg: 0x%x iommu-base: %pad iommu-size: 0x%zx\n",
+ root_dev->iommu_cfg, &root_dev->iommu_base,
+ root_dev->iommu_size);
+
+ return 0;
+}
+
+static int msm_pci_iommu_init(struct msm_root_dev_t *root_dev)
+{
+ int ret;
+ struct dma_iommu_mapping *mapping;
+ struct msm_pcie_dev_t *pcie_dev = root_dev->pcie_dev;
+ struct pci_dev *pci_dev = root_dev->pci_dev;
+
+ ret = msm_pci_iommu_parse_dt(root_dev);
+ if (ret)
+ return ret;
+
+ if (!(root_dev->iommu_cfg & MSM_PCIE_IOMMU_PRESENT))
+ return 0;
+
+ mapping = __depr_arm_iommu_create_mapping(&pci_bus_type,
+ root_dev->iommu_base,
+ root_dev->iommu_size);
+ if (IS_ERR_OR_NULL(mapping)) {
+ ret = PTR_ERR(mapping);
+ PCIE_ERR(pcie_dev,
+ "PCIe: RC%d: Failed to create IOMMU mapping (%d)\n",
+ pcie_dev->rc_idx, ret);
+ return ret;
+ }
+
+ if (root_dev->iommu_cfg & MSM_PCIE_IOMMU_S1_BYPASS) {
+ int iommu_s1_bypass = 1;
+
+ ret = iommu_domain_set_attr(mapping->domain,
+ DOMAIN_ATTR_S1_BYPASS,
+ &iommu_s1_bypass);
+ if (ret) {
+ PCIE_ERR(pcie_dev,
+ "PCIe: RC%d: failed to set attribute S1_BYPASS: %d\n",
+ pcie_dev->rc_idx, ret);
+ goto release_mapping;
+ }
+ }
+
+ if (root_dev->iommu_cfg & MSM_PCIE_IOMMU_FAST) {
+ int iommu_fast = 1;
+
+ ret = iommu_domain_set_attr(mapping->domain,
+ DOMAIN_ATTR_FAST,
+ &iommu_fast);
+ if (ret) {
+ PCIE_ERR(pcie_dev,
+ "PCIe: RC%d: failed to set attribute FAST: %d\n",
+ pcie_dev->rc_idx, ret);
+ goto release_mapping;
+ }
+ }
+
+ if (root_dev->iommu_cfg & MSM_PCIE_IOMMU_ATOMIC) {
+ int iommu_atomic = 1;
+
+ ret = iommu_domain_set_attr(mapping->domain,
+ DOMAIN_ATTR_ATOMIC,
+ &iommu_atomic);
+ if (ret) {
+ PCIE_ERR(pcie_dev,
+ "PCIe: RC%d: failed to set attribute ATOMIC: %d\n",
+ pcie_dev->rc_idx, ret);
+ goto release_mapping;
+ }
+ }
+
+ if (root_dev->iommu_cfg & MSM_PCIE_IOMMU_FORCE_COHERENT) {
+ int iommu_force_coherent = 1;
+
+ ret = iommu_domain_set_attr(mapping->domain,
+ DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT,
+ &iommu_force_coherent);
+ if (ret) {
+ PCIE_ERR(pcie_dev,
+ "PCIe: RC%d: failed to set attribute FORCE_COHERENT: %d\n",
+ pcie_dev->rc_idx, ret);
+ goto release_mapping;
+ }
+ }
+
+ ret = __depr_arm_iommu_attach_device(&pci_dev->dev, mapping);
+ if (ret) {
+ PCIE_ERR(pcie_dev,
+ "failed to iommu attach device (%d)\n",
+ pcie_dev->rc_idx, ret);
+ goto release_mapping;
+ }
+
+ PCIE_DBG(pcie_dev, "PCIe: RC%d: successful iommu attach\n",
+ pcie_dev->rc_idx);
+ return 0;
+
+release_mapping:
+ __depr_arm_iommu_release_mapping(mapping);
+
+ return ret;
+}
+
+int msm_pci_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *device_id)
+{
+ int ret;
+ struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(pci_dev->bus);
+ struct msm_root_dev_t *root_dev;
+
+ PCIE_DBG(pcie_dev, "PCIe: RC%d: PCI Probe\n", pcie_dev->rc_idx);
+
+ if (!pci_dev->dev.of_node)
+ return -ENODEV;
+
+ root_dev = devm_kzalloc(&pci_dev->dev, sizeof(*root_dev), GFP_KERNEL);
+ if (!root_dev)
+ return -ENOMEM;
+
+ root_dev->pcie_dev = pcie_dev;
+ root_dev->pci_dev = pci_dev;
+ dev_set_drvdata(&pci_dev->dev, root_dev);
+
+ ret = msm_pci_iommu_init(root_dev);
+ if (ret)
+ return ret;
+
+ ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ PCIE_ERR(pcie_dev, "DMA set mask failed (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct pci_device_id msm_pci_device_id[] = {
+ {PCI_DEVICE(0x17cb, 0x0108)},
+ {0},
+};
+
+static struct pci_driver msm_pci_driver = {
+ .name = "pci-msm-rc",
+ .id_table = msm_pci_device_id,
+ .probe = msm_pci_probe,
+};
+
+static const struct of_device_id msm_pcie_match[] = {
+ { .compatible = "qcom,pci-msm",
+ },
+ {}
+};
+
+static struct platform_driver msm_pcie_driver = {
+ .probe = msm_pcie_probe,
+ .remove = msm_pcie_remove,
+ .driver = {
+ .name = "pci-msm",
+ .of_match_table = msm_pcie_match,
+ },
+};
+
+static int __init pcie_init(void)
+{
+ int ret = 0, i;
+ char rc_name[MAX_RC_NAME_LEN];
+
+ pr_alert("pcie:%s.\n", __func__);
+
+ pcie_drv.rc_num = 0;
+ mutex_init(&pcie_drv.drv_lock);
+
+ for (i = 0; i < MAX_RC_NUM; i++) {
+ snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-short", i);
+ msm_pcie_dev[i].ipc_log =
+ ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
+ if (msm_pcie_dev[i].ipc_log == NULL)
+ pr_err("%s: unable to create IPC log context for %s\n",
+ __func__, rc_name);
+ else
+ PCIE_DBG(&msm_pcie_dev[i],
+ "PCIe IPC logging is enable for RC%d\n",
+ i);
+ snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-long", i);
+ msm_pcie_dev[i].ipc_log_long =
+ ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
+ if (msm_pcie_dev[i].ipc_log_long == NULL)
+ pr_err("%s: unable to create IPC log context for %s\n",
+ __func__, rc_name);
+ else
+ PCIE_DBG(&msm_pcie_dev[i],
+ "PCIe IPC logging %s is enable for RC%d\n",
+ rc_name, i);
+ snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-dump", i);
+ msm_pcie_dev[i].ipc_log_dump =
+ ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
+ if (msm_pcie_dev[i].ipc_log_dump == NULL)
+ pr_err("%s: unable to create IPC log context for %s\n",
+ __func__, rc_name);
+ else
+ PCIE_DBG(&msm_pcie_dev[i],
+ "PCIe IPC logging %s is enable for RC%d\n",
+ rc_name, i);
+ spin_lock_init(&msm_pcie_dev[i].cfg_lock);
+ msm_pcie_dev[i].cfg_access = true;
+ mutex_init(&msm_pcie_dev[i].enumerate_lock);
+ mutex_init(&msm_pcie_dev[i].setup_lock);
+ mutex_init(&msm_pcie_dev[i].recovery_lock);
+ spin_lock_init(&msm_pcie_dev[i].wakeup_lock);
+ spin_lock_init(&msm_pcie_dev[i].irq_lock);
+ msm_pcie_dev[i].drv_ready = false;
+ }
+ for (i = 0; i < MAX_RC_NUM * MAX_DEVICE_NUM; i++) {
+ msm_pcie_dev_tbl[i].bdf = 0;
+ msm_pcie_dev_tbl[i].dev = NULL;
+ msm_pcie_dev_tbl[i].short_bdf = 0;
+ msm_pcie_dev_tbl[i].sid = 0;
+ msm_pcie_dev_tbl[i].domain = -1;
+ msm_pcie_dev_tbl[i].conf_base = NULL;
+ msm_pcie_dev_tbl[i].phy_address = 0;
+ msm_pcie_dev_tbl[i].dev_ctrlstts_offset = 0;
+ msm_pcie_dev_tbl[i].event_reg = NULL;
+ msm_pcie_dev_tbl[i].registered = true;
+ }
+
+ crc8_populate_msb(msm_pcie_crc8_table, MSM_PCIE_CRC8_POLYNOMIAL);
+
+ msm_pcie_debugfs_init();
+
+ ret = pci_register_driver(&msm_pci_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&msm_pcie_driver);
+
+ return ret;
+}
+
+static void __exit pcie_exit(void)
+{
+ int i;
+
+ PCIE_GEN_DBG("pcie:%s.\n", __func__);
+
+ platform_driver_unregister(&msm_pcie_driver);
+
+ msm_pcie_debugfs_exit();
+
+ for (i = 0; i < MAX_RC_NUM; i++)
+ msm_pcie_sysfs_exit(&msm_pcie_dev[i]);
+}
+
+subsys_initcall_sync(pcie_init);
+module_exit(pcie_exit);
+
+
+/* RC do not represent the right class; set it to PCI_CLASS_BRIDGE_PCI */
+static void msm_pcie_fixup_early(struct pci_dev *dev)
+{
+ struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+
+ PCIE_DBG(pcie_dev, "hdr_type %d\n", dev->hdr_type);
+ if (pci_is_root_bus(dev->bus))
+ dev->class = (dev->class & 0xff) | (PCI_CLASS_BRIDGE_PCI << 8);
+}
+DECLARE_PCI_FIXUP_EARLY(PCIE_VENDOR_ID_QCOM, PCI_ANY_ID,
+ msm_pcie_fixup_early);
+
+/* Suspend the PCIe link */
+static int msm_pcie_pm_suspend(struct pci_dev *dev,
+ void *user, void *data, u32 options)
+{
+ int ret = 0;
+ u32 val = 0;
+ int ret_l23;
+ unsigned long irqsave_flags;
+ struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+
+ PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
+
+ spin_lock_irqsave(&pcie_dev->irq_lock, irqsave_flags);
+ pcie_dev->suspending = true;
+ spin_unlock_irqrestore(&pcie_dev->irq_lock, irqsave_flags);
+
+ if (!pcie_dev->power_on) {
+ PCIE_DBG(pcie_dev,
+ "PCIe: power of RC%d has been turned off.\n",
+ pcie_dev->rc_idx);
+ return ret;
+ }
+
+ if (dev && !(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)
+ && msm_pcie_confirm_linkup(pcie_dev, true, true,
+ pcie_dev->conf)) {
+ ret = pci_save_state(dev);
+ pcie_dev->saved_state = pci_store_saved_state(dev);
+ }
+ if (ret) {
+ PCIE_ERR(pcie_dev, "PCIe: fail to save state of RC%d:%d.\n",
+ pcie_dev->rc_idx, ret);
+ pcie_dev->suspending = false;
+ return ret;
+ }
+
+ spin_lock_irqsave(&pcie_dev->cfg_lock,
+ pcie_dev->irqsave_flags);
+ pcie_dev->cfg_access = false;
+ spin_unlock_irqrestore(&pcie_dev->cfg_lock,
+ pcie_dev->irqsave_flags);
+
+ msm_pcie_write_mask(pcie_dev->elbi + PCIE20_ELBI_SYS_CTRL, 0,
+ BIT(4));
+
+ PCIE_DBG(pcie_dev, "RC%d: PME_TURNOFF_MSG is sent out\n",
+ pcie_dev->rc_idx);
+
+ ret_l23 = readl_poll_timeout((pcie_dev->parf
+ + PCIE20_PARF_PM_STTS), val, (val & BIT(5)), 10000, 100000);
+
+ /* check L23_Ready */
+ PCIE_DBG(pcie_dev, "RC%d: PCIE20_PARF_PM_STTS is 0x%x.\n",
+ pcie_dev->rc_idx,
+ readl_relaxed(pcie_dev->parf + PCIE20_PARF_PM_STTS));
+ if (!ret_l23)
+ PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is received\n",
+ pcie_dev->rc_idx);
+ else
+ PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is NOT received\n",
+ pcie_dev->rc_idx);
+
+ if (pcie_dev->use_pinctrl && pcie_dev->pins_sleep)
+ pinctrl_select_state(pcie_dev->pinctrl,
+ pcie_dev->pins_sleep);
+
+ msm_pcie_disable(pcie_dev);
+
+ PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
+
+ return ret;
+}
+
+static void msm_pcie_fixup_suspend(struct pci_dev *dev)
+{
+ int ret;
+ struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+
+ PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
+
+ if (pcie_dev->link_status != MSM_PCIE_LINK_ENABLED ||
+ !pci_is_root_bus(dev->bus))
+ return;
+
+ spin_lock_irqsave(&pcie_dev->cfg_lock,
+ pcie_dev->irqsave_flags);
+ if (pcie_dev->disable_pc) {
+ PCIE_DBG(pcie_dev,
+ "RC%d: Skip suspend because of user request\n",
+ pcie_dev->rc_idx);
+ spin_unlock_irqrestore(&pcie_dev->cfg_lock,
+ pcie_dev->irqsave_flags);
+ return;
+ }
+ spin_unlock_irqrestore(&pcie_dev->cfg_lock,
+ pcie_dev->irqsave_flags);
+
+ mutex_lock(&pcie_dev->recovery_lock);
+
+ ret = msm_pcie_pm_suspend(dev, NULL, NULL, 0);
+ if (ret)
+ PCIE_ERR(pcie_dev, "PCIe: RC%d got failure in suspend:%d.\n",
+ pcie_dev->rc_idx, ret);
+
+ mutex_unlock(&pcie_dev->recovery_lock);
+}
+DECLARE_PCI_FIXUP_SUSPEND(PCIE_VENDOR_ID_QCOM, PCI_ANY_ID,
+ msm_pcie_fixup_suspend);
+
+/* Resume the PCIe link */
+static int msm_pcie_pm_resume(struct pci_dev *dev,
+ void *user, void *data, u32 options)
+{
+ int ret;
+ struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+
+ PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
+
+ if (pcie_dev->use_pinctrl && pcie_dev->pins_default)
+ pinctrl_select_state(pcie_dev->pinctrl,
+ pcie_dev->pins_default);
+
+ spin_lock_irqsave(&pcie_dev->cfg_lock,
+ pcie_dev->irqsave_flags);
+ pcie_dev->cfg_access = true;
+ spin_unlock_irqrestore(&pcie_dev->cfg_lock,
+ pcie_dev->irqsave_flags);
+
+ ret = msm_pcie_enable(pcie_dev);
+ if (ret) {
+ PCIE_ERR(pcie_dev,
+ "PCIe: RC%d fail to enable PCIe link in resume.\n",
+ pcie_dev->rc_idx);
+ return ret;
+ }
+
+ pcie_dev->suspending = false;
+ PCIE_DBG(pcie_dev,
+ "dev->bus->number = %d dev->bus->primary = %d\n",
+ dev->bus->number, dev->bus->primary);
+
+ if (!(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)) {
+ if (pcie_dev->saved_state) {
+ PCIE_DBG(pcie_dev,
+ "RC%d: entry of PCI framework restore state\n",
+ pcie_dev->rc_idx);
+
+ pci_load_and_free_saved_state(dev,
+ &pcie_dev->saved_state);
+ pci_restore_state(dev);
+
+ PCIE_DBG(pcie_dev,
+ "RC%d: exit of PCI framework restore state\n",
+ pcie_dev->rc_idx);
+ } else {
+ PCIE_DBG(pcie_dev,
+ "RC%d: restore rc config space using shadow recovery\n",
+ pcie_dev->rc_idx);
+ msm_pcie_cfg_recover(pcie_dev, true);
+ }
+ }
+
+ if (pcie_dev->bridge_found) {
+ PCIE_DBG(pcie_dev,
+ "RC%d: entry of PCIe recover config\n",
+ pcie_dev->rc_idx);
+
+ msm_pcie_recover_config(dev);
+
+ PCIE_DBG(pcie_dev,
+ "RC%d: exit of PCIe recover config\n",
+ pcie_dev->rc_idx);
+ }
+
+ PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
+
+ return ret;
+}
+
+static void msm_pcie_fixup_resume(struct pci_dev *dev)
+{
+ int ret;
+ struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+
+ PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
+
+ if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
+ pcie_dev->user_suspend || !pci_is_root_bus(dev->bus))
+ return;
+
+ mutex_lock(&pcie_dev->recovery_lock);
+ ret = msm_pcie_pm_resume(dev, NULL, NULL, 0);
+ if (ret)
+ PCIE_ERR(pcie_dev,
+ "PCIe: RC%d got failure in fixup resume:%d.\n",
+ pcie_dev->rc_idx, ret);
+
+ mutex_unlock(&pcie_dev->recovery_lock);
+}
+DECLARE_PCI_FIXUP_RESUME(PCIE_VENDOR_ID_QCOM, PCI_ANY_ID,
+ msm_pcie_fixup_resume);
+
+static void msm_pcie_fixup_resume_early(struct pci_dev *dev)
+{
+ int ret;
+ struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+
+ PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
+
+ if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
+ pcie_dev->user_suspend || !pci_is_root_bus(dev->bus))
+ return;
+
+ mutex_lock(&pcie_dev->recovery_lock);
+ ret = msm_pcie_pm_resume(dev, NULL, NULL, 0);
+ if (ret)
+ PCIE_ERR(pcie_dev, "PCIe: RC%d got failure in resume:%d.\n",
+ pcie_dev->rc_idx, ret);
+
+ mutex_unlock(&pcie_dev->recovery_lock);
+}
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCIE_VENDOR_ID_QCOM, PCI_ANY_ID,
+ msm_pcie_fixup_resume_early);
+
+int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr, void *user,
+ void *data, u32 options)
+{
+ int i, ret = 0;
+ struct pci_dev *dev;
+ u32 rc_idx = 0;
+ struct msm_pcie_dev_t *pcie_dev;
+
+ PCIE_GEN_DBG("PCIe: pm_opt:%d;busnr:%d;options:%d\n",
+ pm_opt, busnr, options);
+
+
+ if (!user) {
+ pr_err("PCIe: endpoint device is NULL\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)user)->bus);
+
+ if (pcie_dev) {
+ rc_idx = pcie_dev->rc_idx;
+ PCIE_DBG(pcie_dev,
+ "PCIe: RC%d: pm_opt:%d;busnr:%d;options:%d\n",
+ rc_idx, pm_opt, busnr, options);
+ } else {
+ pr_err(
+ "PCIe: did not find RC for pci endpoint device.\n"
+ );
+ ret = -ENODEV;
+ goto out;
+ }
+
+ for (i = 0; i < MAX_DEVICE_NUM; i++) {
+ if (!busnr)
+ break;
+ if (user == pcie_dev->pcidev_table[i].dev) {
+ if (busnr == pcie_dev->pcidev_table[i].bdf >> 24)
+ break;
+
+ PCIE_ERR(pcie_dev,
+ "PCIe: RC%d: bus number %d does not match with the expected value %d\n",
+ pcie_dev->rc_idx, busnr,
+ pcie_dev->pcidev_table[i].bdf >> 24);
+ ret = MSM_PCIE_ERROR;
+ goto out;
+ }
+ }
+
+ if (i == MAX_DEVICE_NUM) {
+ PCIE_ERR(pcie_dev,
+ "PCIe: RC%d: endpoint device was not found in device table",
+ pcie_dev->rc_idx);
+ ret = MSM_PCIE_ERROR;
+ goto out;
+ }
+
+ dev = msm_pcie_dev[rc_idx].dev;
+
+ if (!msm_pcie_dev[rc_idx].drv_ready) {
+ PCIE_ERR(&msm_pcie_dev[rc_idx],
+ "RC%d has not been successfully probed yet\n",
+ rc_idx);
+ return -EPROBE_DEFER;
+ }
+
+ switch (pm_opt) {
+ case MSM_PCIE_SUSPEND:
+ PCIE_DBG(&msm_pcie_dev[rc_idx],
+ "User of RC%d requests to suspend the link\n", rc_idx);
+ if (msm_pcie_dev[rc_idx].link_status != MSM_PCIE_LINK_ENABLED)
+ PCIE_DBG(&msm_pcie_dev[rc_idx],
+ "PCIe: RC%d: requested to suspend when link is not enabled:%d.\n",
+ rc_idx, msm_pcie_dev[rc_idx].link_status);
+
+ if (!msm_pcie_dev[rc_idx].power_on) {
+ PCIE_ERR(&msm_pcie_dev[rc_idx],
+ "PCIe: RC%d: requested to suspend when link is powered down:%d.\n",
+ rc_idx, msm_pcie_dev[rc_idx].link_status);
+ break;
+ }
+
+ if (msm_pcie_dev[rc_idx].pending_ep_reg) {
+ PCIE_DBG(&msm_pcie_dev[rc_idx],
+ "PCIe: RC%d: request to suspend the link is rejected\n",
+ rc_idx);
+ break;
+ }
+
+ if (pcie_dev->num_active_ep) {
+ PCIE_DBG(pcie_dev,
+ "RC%d: an EP requested to suspend the link, but other EPs are still active: %d\n",
+ pcie_dev->rc_idx, pcie_dev->num_active_ep);
+ return ret;
+ }
+
+ msm_pcie_dev[rc_idx].user_suspend = true;
+
+ mutex_lock(&msm_pcie_dev[rc_idx].recovery_lock);
+
+ ret = msm_pcie_pm_suspend(dev, user, data, options);
+ if (ret) {
+ PCIE_ERR(&msm_pcie_dev[rc_idx],
+ "PCIe: RC%d: user failed to suspend the link.\n",
+ rc_idx);
+ msm_pcie_dev[rc_idx].user_suspend = false;
+ }
+
+ mutex_unlock(&msm_pcie_dev[rc_idx].recovery_lock);
+ break;
+ case MSM_PCIE_RESUME:
+ PCIE_DBG(&msm_pcie_dev[rc_idx],
+ "User of RC%d requests to resume the link\n", rc_idx);
+ if (msm_pcie_dev[rc_idx].link_status !=
+ MSM_PCIE_LINK_DISABLED) {
+ PCIE_ERR(&msm_pcie_dev[rc_idx],
+ "PCIe: RC%d: requested to resume when link is not disabled:%d. Number of active EP(s): %d\n",
+ rc_idx, msm_pcie_dev[rc_idx].link_status,
+ msm_pcie_dev[rc_idx].num_active_ep);
+ break;
+ }
+
+ mutex_lock(&msm_pcie_dev[rc_idx].recovery_lock);
+ ret = msm_pcie_pm_resume(dev, user, data, options);
+ if (ret) {
+ PCIE_ERR(&msm_pcie_dev[rc_idx],
+ "PCIe: RC%d: user failed to resume the link.\n",
+ rc_idx);
+ } else {
+ PCIE_DBG(&msm_pcie_dev[rc_idx],
+ "PCIe: RC%d: user succeeded to resume the link.\n",
+ rc_idx);
+
+ msm_pcie_dev[rc_idx].user_suspend = false;
+ }
+
+ mutex_unlock(&msm_pcie_dev[rc_idx].recovery_lock);
+
+ break;
+ case MSM_PCIE_DISABLE_PC:
+ PCIE_DBG(&msm_pcie_dev[rc_idx],
+ "User of RC%d requests to keep the link always alive.\n",
+ rc_idx);
+ spin_lock_irqsave(&msm_pcie_dev[rc_idx].cfg_lock,
+ msm_pcie_dev[rc_idx].irqsave_flags);
+ if (msm_pcie_dev[rc_idx].suspending) {
+ PCIE_ERR(&msm_pcie_dev[rc_idx],
+ "PCIe: RC%d Link has been suspended before request\n",
+ rc_idx);
+ ret = MSM_PCIE_ERROR;
+ } else {
+ msm_pcie_dev[rc_idx].disable_pc = true;
+ }
+ spin_unlock_irqrestore(&msm_pcie_dev[rc_idx].cfg_lock,
+ msm_pcie_dev[rc_idx].irqsave_flags);
+ break;
+ case MSM_PCIE_ENABLE_PC:
+ PCIE_DBG(&msm_pcie_dev[rc_idx],
+ "User of RC%d cancels the request of alive link.\n",
+ rc_idx);
+ spin_lock_irqsave(&msm_pcie_dev[rc_idx].cfg_lock,
+ msm_pcie_dev[rc_idx].irqsave_flags);
+ msm_pcie_dev[rc_idx].disable_pc = false;
+ spin_unlock_irqrestore(&msm_pcie_dev[rc_idx].cfg_lock,
+ msm_pcie_dev[rc_idx].irqsave_flags);
+ break;
+ default:
+ PCIE_ERR(&msm_pcie_dev[rc_idx],
+ "PCIe: RC%d: unsupported pm operation:%d.\n",
+ rc_idx, pm_opt);
+ ret = -ENODEV;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL(msm_pcie_pm_control);
+
+int msm_pcie_register_event(struct msm_pcie_register_event *reg)
+{
+ int i, ret = 0;
+ struct msm_pcie_dev_t *pcie_dev;
+
+ if (!reg) {
+ pr_err("PCIe: Event registration is NULL\n");
+ return -ENODEV;
+ }
+
+ if (!reg->user) {
+ pr_err("PCIe: User of event registration is NULL\n");
+ return -ENODEV;
+ }
+
+ pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user)->bus);
+
+ if (!pcie_dev) {
+ PCIE_ERR(pcie_dev, "%s",
+ "PCIe: did not find RC for pci endpoint device.\n");
+ return -ENODEV;
+ }
+
+ if (pcie_dev->num_ep > 1) {
+ for (i = 0; i < MAX_DEVICE_NUM; i++) {
+ if (reg->user ==
+ pcie_dev->pcidev_table[i].dev) {
+ pcie_dev->event_reg =
+ pcie_dev->pcidev_table[i].event_reg;
+
+ if (!pcie_dev->event_reg) {
+ pcie_dev->pcidev_table[i].registered =
+ true;
+
+ pcie_dev->num_active_ep++;
+ PCIE_DBG(pcie_dev,
+ "PCIe: RC%d: number of active EP(s): %d.\n",
+ pcie_dev->rc_idx,
+ pcie_dev->num_active_ep);
+ }
+
+ pcie_dev->event_reg = reg;
+ pcie_dev->pcidev_table[i].event_reg = reg;
+ PCIE_DBG(pcie_dev,
+ "Event 0x%x is registered for RC %d\n",
+ reg->events,
+ pcie_dev->rc_idx);
+
+ break;
+ }
+ }
+
+ if (pcie_dev->pending_ep_reg) {
+ for (i = 0; i < MAX_DEVICE_NUM; i++)
+ if (!pcie_dev->pcidev_table[i].registered)
+ break;
+
+ if (i == MAX_DEVICE_NUM)
+ pcie_dev->pending_ep_reg = false;
+ }
+ } else {
+ pcie_dev->event_reg = reg;
+ PCIE_DBG(pcie_dev,
+ "Event 0x%x is registered for RC %d\n", reg->events,
+ pcie_dev->rc_idx);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(msm_pcie_register_event);
+
+int msm_pcie_deregister_event(struct msm_pcie_register_event *reg)
+{
+ int i, ret = 0;
+ struct msm_pcie_dev_t *pcie_dev;
+
+ if (!reg) {
+ pr_err("PCIe: Event deregistration is NULL\n");
+ return -ENODEV;
+ }
+
+ if (!reg->user) {
+ pr_err("PCIe: User of event deregistration is NULL\n");
+ return -ENODEV;
+ }
+
+ pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user)->bus);
+
+ if (!pcie_dev) {
+ PCIE_ERR(pcie_dev, "%s",
+ "PCIe: did not find RC for pci endpoint device.\n");
+ return -ENODEV;
+ }
+
+ if (pcie_dev->num_ep > 1) {
+ for (i = 0; i < MAX_DEVICE_NUM; i++) {
+ if (reg->user == pcie_dev->pcidev_table[i].dev) {
+ if (pcie_dev->pcidev_table[i].event_reg) {
+ pcie_dev->num_active_ep--;
+ PCIE_DBG(pcie_dev,
+ "PCIe: RC%d: number of active EP(s) left: %d.\n",
+ pcie_dev->rc_idx,
+ pcie_dev->num_active_ep);
+ }
+
+ pcie_dev->event_reg = NULL;
+ pcie_dev->pcidev_table[i].event_reg = NULL;
+ PCIE_DBG(pcie_dev,
+ "Event is deregistered for RC %d\n",
+ pcie_dev->rc_idx);
+
+ break;
+ }
+ }
+ } else {
+ pcie_dev->event_reg = NULL;
+ PCIE_DBG(pcie_dev, "Event is deregistered for RC %d\n",
+ pcie_dev->rc_idx);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(msm_pcie_deregister_event);
+
+int msm_pcie_recover_config(struct pci_dev *dev)
+{
+ int ret = 0;
+ struct msm_pcie_dev_t *pcie_dev;
+
+ if (dev) {
+ pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+ PCIE_DBG(pcie_dev,
+ "Recovery for the link of RC%d\n", pcie_dev->rc_idx);
+ } else {
+ pr_err("PCIe: the input pci dev is NULL.\n");
+ return -ENODEV;
+ }
+
+ if (msm_pcie_confirm_linkup(pcie_dev, true, true, pcie_dev->conf)) {
+ PCIE_DBG(pcie_dev,
+ "Recover config space of RC%d and its EP\n",
+ pcie_dev->rc_idx);
+ pcie_dev->shadow_en = false;
+ PCIE_DBG(pcie_dev, "Recover RC%d\n", pcie_dev->rc_idx);
+ msm_pcie_cfg_recover(pcie_dev, true);
+ PCIE_DBG(pcie_dev, "Recover EP of RC%d\n", pcie_dev->rc_idx);
+ msm_pcie_cfg_recover(pcie_dev, false);
+ PCIE_DBG(pcie_dev,
+ "Refreshing the saved config space in PCI framework for RC%d and its EP\n",
+ pcie_dev->rc_idx);
+ pci_save_state(pcie_dev->dev);
+ pci_save_state(dev);
+ pcie_dev->shadow_en = true;
+ PCIE_DBG(pcie_dev, "Turn on shadow for RC%d\n",
+ pcie_dev->rc_idx);
+ } else {
+ PCIE_ERR(pcie_dev,
+ "PCIe: the link of RC%d is not up yet; can't recover config space.\n",
+ pcie_dev->rc_idx);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(msm_pcie_recover_config);
+
+int msm_pcie_shadow_control(struct pci_dev *dev, bool enable)
+{
+ int ret = 0;
+ struct msm_pcie_dev_t *pcie_dev;
+
+ if (dev) {
+ pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+ PCIE_DBG(pcie_dev,
+ "User requests to %s shadow\n",
+ enable ? "enable" : "disable");
+ } else {
+ pr_err("PCIe: the input pci dev is NULL.\n");
+ return -ENODEV;
+ }
+
+ PCIE_DBG(pcie_dev,
+ "The shadowing of RC%d is %s enabled currently.\n",
+ pcie_dev->rc_idx, pcie_dev->shadow_en ? "" : "not");
+
+ pcie_dev->shadow_en = enable;
+
+ PCIE_DBG(pcie_dev,
+ "Shadowing of RC%d is turned %s upon user's request.\n",
+ pcie_dev->rc_idx, enable ? "on" : "off");
+
+ return ret;
+}
+EXPORT_SYMBOL(msm_pcie_shadow_control);
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index ff6dab0..6c803ea 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -645,4 +645,6 @@
what is connected to USB PD ports from the EC and converts
that into power_supply properties.
+source "drivers/power/supply/qcom/Kconfig"
+
endif # POWER_SUPPLY
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index a26b402..31305eb 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -85,3 +85,4 @@
obj-$(CONFIG_AXP288_FUEL_GAUGE) += axp288_fuel_gauge.o
obj-$(CONFIG_AXP288_CHARGER) += axp288_charger.o
obj-$(CONFIG_CHARGER_CROS_USBPD) += cros_usbpd-charger.o
+obj-$(CONFIG_ARCH_QCOM) += qcom/
diff --git a/drivers/power/supply/qcom/Kconfig b/drivers/power/supply/qcom/Kconfig
new file mode 100644
index 0000000..2263c11
--- /dev/null
+++ b/drivers/power/supply/qcom/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "Qualcomm Technologies, Inc. Charger and Fuel Gauge support"
+
+config QPNP_SMB5
+ tristate "SMB5 Battery Charger"
+ depends on MFD_SPMI_PMIC
+ help
+ Say Y to enables support for the SMB5 charging peripheral.
+ The QPNP SMB5 charger driver supports the charger peripheral
+ present in the chip.
+ The power supply framework is used to communicate battery and
+ usb properties to userspace and other driver consumers such
+ as fuel gauge, USB, and USB-PD.
+ VBUS and VCONN regulators are registered for supporting OTG,
+ and powered Type-C cables respectively.
+
+endmenu
diff --git a/drivers/power/supply/qcom/Makefile b/drivers/power/supply/qcom/Makefile
new file mode 100644
index 0000000..8901a9c
--- /dev/null
+++ b/drivers/power/supply/qcom/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_QPNP_SMB5) += step-chg-jeita.o battery.o qpnp-smb5.o smb5-lib.o pmic-voter.o storm-watch.o schgm-flash.o
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
new file mode 100644
index 0000000..2241619
--- /dev/null
+++ b/drivers/power/supply/qcom/battery.c
@@ -0,0 +1,1755 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "QCOM-BATT: %s: " fmt, __func__
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/power_supply.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include <linux/printk.h>
+#include <linux/pm_wakeup.h>
+#include <linux/slab.h>
+#include <linux/pmic-voter.h>
+#include <linux/workqueue.h>
+#include "battery.h"
+
+#define DRV_MAJOR_VERSION 1
+#define DRV_MINOR_VERSION 0
+
+#define CHG_STATE_VOTER "CHG_STATE_VOTER"
+#define TAPER_STEPPER_VOTER "TAPER_STEPPER_VOTER"
+#define TAPER_END_VOTER "TAPER_END_VOTER"
+#define PL_TAPER_EARLY_BAD_VOTER "PL_TAPER_EARLY_BAD_VOTER"
+#define PARALLEL_PSY_VOTER "PARALLEL_PSY_VOTER"
+#define PL_HW_ABSENT_VOTER "PL_HW_ABSENT_VOTER"
+#define PL_VOTER "PL_VOTER"
+#define RESTRICT_CHG_VOTER "RESTRICT_CHG_VOTER"
+#define ICL_CHANGE_VOTER "ICL_CHANGE_VOTER"
+#define PL_INDIRECT_VOTER "PL_INDIRECT_VOTER"
+#define USBIN_I_VOTER "USBIN_I_VOTER"
+#define PL_FCC_LOW_VOTER "PL_FCC_LOW_VOTER"
+#define ICL_LIMIT_VOTER "ICL_LIMIT_VOTER"
+#define FCC_STEPPER_VOTER "FCC_STEPPER_VOTER"
+
+struct pl_data {
+ int pl_mode;
+ int pl_batfet_mode;
+ int pl_min_icl_ua;
+ int slave_pct;
+ int slave_fcc_ua;
+ int main_fcc_ua;
+ int restricted_current;
+ bool restricted_charging_enabled;
+ struct votable *fcc_votable;
+ struct votable *fv_votable;
+ struct votable *pl_disable_votable;
+ struct votable *pl_awake_votable;
+ struct votable *hvdcp_hw_inov_dis_votable;
+ struct votable *usb_icl_votable;
+ struct votable *pl_enable_votable_indirect;
+ struct delayed_work status_change_work;
+ struct work_struct pl_disable_forever_work;
+ struct work_struct pl_taper_work;
+ struct delayed_work pl_awake_work;
+ struct delayed_work fcc_stepper_work;
+ bool taper_work_running;
+ struct power_supply *main_psy;
+ struct power_supply *pl_psy;
+ struct power_supply *batt_psy;
+ struct power_supply *usb_psy;
+ struct power_supply *dc_psy;
+ int charge_type;
+ int total_settled_ua;
+ int pl_settled_ua;
+ int pl_fcc_max;
+ int fcc_stepper_enable;
+ int main_step_fcc_dir;
+ int main_step_fcc_count;
+ int main_step_fcc_residual;
+ int parallel_step_fcc_dir;
+ int parallel_step_fcc_count;
+ int parallel_step_fcc_residual;
+ int step_fcc;
+ u32 wa_flags;
+ struct class qcom_batt_class;
+ struct wakeup_source *pl_ws;
+ struct notifier_block nb;
+ bool pl_disable;
+ int taper_entry_fv;
+ int main_fcc_max;
+ /* debugfs directory */
+ struct dentry *dfs_root;
+};
+
+struct pl_data *the_chip;
+
+enum print_reason {
+ PR_PARALLEL = BIT(0),
+};
+
+enum {
+ AICL_RERUN_WA_BIT = BIT(0),
+ FORCE_INOV_DISABLE_BIT = BIT(1),
+};
+
+static int debug_mask;
+
+#define pl_dbg(chip, reason, fmt, ...) \
+ do { \
+ if (debug_mask & (reason)) \
+ pr_info(fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define IS_USBIN(mode) ((mode == POWER_SUPPLY_PL_USBIN_USBIN) \
+ || (mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
+enum {
+ VER = 0,
+ SLAVE_PCT,
+ RESTRICT_CHG_ENABLE,
+ RESTRICT_CHG_CURRENT,
+ FCC_STEPPING_IN_PROGRESS,
+};
+
+/*******
+ * ICL *
+ ********/
+static int get_settled_split(struct pl_data *chip, int *main_icl_ua,
+ int *slave_icl_ua, int *total_settled_icl_ua)
+{
+ int slave_icl_pct, total_current_ua;
+ int slave_ua = 0, main_settled_ua = 0;
+ union power_supply_propval pval = {0, };
+ int rc, total_settled_ua = 0;
+
+ if (!IS_USBIN(chip->pl_mode))
+ return -EINVAL;
+
+ if (!chip->main_psy)
+ return -EINVAL;
+
+ if (!get_effective_result_locked(chip->pl_disable_votable)) {
+ /* read the aicl settled value */
+ rc = power_supply_get_property(chip->main_psy,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+ return rc;
+ }
+ main_settled_ua = pval.intval;
+ slave_icl_pct = max(0, chip->slave_pct);
+ slave_ua = ((main_settled_ua + chip->pl_settled_ua)
+ * slave_icl_pct) / 100;
+ total_settled_ua = main_settled_ua + chip->pl_settled_ua;
+ }
+
+ total_current_ua = get_effective_result_locked(chip->usb_icl_votable);
+ if (total_current_ua < 0) {
+ if (!chip->usb_psy)
+ chip->usb_psy = power_supply_get_by_name("usb");
+ if (!chip->usb_psy) {
+ pr_err("Couldn't get usbpsy while splitting settled\n");
+ return -ENOENT;
+ }
+ /* no client is voting, so get the total current from charger */
+ rc = power_supply_get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_HW_CURRENT_MAX, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get max current rc=%d\n", rc);
+ return rc;
+ }
+ total_current_ua = pval.intval;
+ }
+
+ *main_icl_ua = total_current_ua - slave_ua;
+ *slave_icl_ua = slave_ua;
+ *total_settled_icl_ua = total_settled_ua;
+
+ pl_dbg(chip, PR_PARALLEL,
+ "Split total_current_ua=%d total_settled_ua=%d main_settled_ua=%d slave_ua=%d\n",
+ total_current_ua, total_settled_ua, main_settled_ua, slave_ua);
+
+ return 0;
+}
+
+static int validate_parallel_icl(struct pl_data *chip, bool *disable)
+{
+ int rc = 0;
+ int main_ua = 0, slave_ua = 0, total_settled_ua = 0;
+
+ if (!IS_USBIN(chip->pl_mode)
+ || get_effective_result_locked(chip->pl_disable_votable))
+ return 0;
+
+ rc = get_settled_split(chip, &main_ua, &slave_ua, &total_settled_ua);
+ if (rc < 0) {
+ pr_err("Couldn't get split current rc=%d\n", rc);
+ return rc;
+ }
+
+ if (slave_ua < chip->pl_min_icl_ua)
+ *disable = true;
+ else
+ *disable = false;
+
+ return 0;
+}
+
+static void split_settled(struct pl_data *chip)
+{
+ union power_supply_propval pval = {0, };
+ int rc, main_ua, slave_ua, total_settled_ua;
+
+ rc = get_settled_split(chip, &main_ua, &slave_ua, &total_settled_ua);
+ if (rc < 0) {
+ pr_err("Couldn't get split current rc=%d\n", rc);
+ return;
+ }
+
+ /*
+ * If there is an increase in slave share
+ * (Also handles parallel enable case)
+ * Set Main ICL then slave ICL
+ * else
+ * (Also handles parallel disable case)
+ * Set slave ICL then main ICL.
+ */
+ if (slave_ua > chip->pl_settled_ua) {
+ pval.intval = main_ua;
+ /* Set ICL on main charger */
+ rc = power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't change slave suspend state rc=%d\n",
+ rc);
+ return;
+ }
+
+ /* set parallel's ICL could be 0mA when pl is disabled */
+ pval.intval = slave_ua;
+ rc = power_supply_set_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't set parallel icl, rc=%d\n", rc);
+ return;
+ }
+ } else {
+ /* set parallel's ICL could be 0mA when pl is disabled */
+ pval.intval = slave_ua;
+ rc = power_supply_set_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't set parallel icl, rc=%d\n", rc);
+ return;
+ }
+
+ pval.intval = main_ua;
+ /* Set ICL on main charger */
+ rc = power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't change slave suspend state rc=%d\n",
+ rc);
+ return;
+ }
+ }
+
+ chip->total_settled_ua = total_settled_ua;
+ chip->pl_settled_ua = slave_ua;
+}
+
+static ssize_t version_show(struct class *c, struct class_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d.%d\n",
+ DRV_MAJOR_VERSION, DRV_MINOR_VERSION);
+}
+static CLASS_ATTR_RO(version);
+
+/*************
+ * SLAVE PCT *
+ **************/
+static ssize_t slave_pct_show(struct class *c, struct class_attribute *attr,
+ char *ubuf)
+{
+ struct pl_data *chip = container_of(c, struct pl_data,
+ qcom_batt_class);
+
+ return snprintf(ubuf, PAGE_SIZE, "%d\n", chip->slave_pct);
+}
+
+static ssize_t slave_pct_store(struct class *c, struct class_attribute *attr,
+ const char *ubuf, size_t count)
+{
+ struct pl_data *chip = container_of(c, struct pl_data, qcom_batt_class);
+ int rc;
+ unsigned long val;
+ bool disable = false;
+
+ if (kstrtoul(ubuf, 10, &val))
+ return -EINVAL;
+
+ chip->slave_pct = val;
+
+ rc = validate_parallel_icl(chip, &disable);
+ if (rc < 0)
+ return rc;
+
+ vote(chip->pl_disable_votable, ICL_LIMIT_VOTER, disable, 0);
+ rerun_election(chip->fcc_votable);
+ rerun_election(chip->fv_votable);
+ split_settled(chip);
+
+ return count;
+}
+static struct class_attribute class_attr_slave_pct =
+ __ATTR(parallel_pct, 0644, slave_pct_show, slave_pct_store);
+
+/************************
+ * RESTRICTED CHARGIGNG *
+ ************************/
+static ssize_t restrict_chg_show(struct class *c, struct class_attribute *attr,
+ char *ubuf)
+{
+ struct pl_data *chip = container_of(c, struct pl_data,
+ qcom_batt_class);
+
+ return snprintf(ubuf, PAGE_SIZE, "%d\n",
+ chip->restricted_charging_enabled);
+}
+
+static ssize_t restrict_chg_store(struct class *c, struct class_attribute *attr,
+ const char *ubuf, size_t count)
+{
+ struct pl_data *chip = container_of(c, struct pl_data,
+ qcom_batt_class);
+ unsigned long val;
+
+ if (kstrtoul(ubuf, 10, &val))
+ return -EINVAL;
+
+ if (chip->restricted_charging_enabled == !!val)
+ goto no_change;
+
+ chip->restricted_charging_enabled = !!val;
+
+ /* disable parallel charger in case of restricted charging */
+ vote(chip->pl_disable_votable, RESTRICT_CHG_VOTER,
+ chip->restricted_charging_enabled, 0);
+
+ vote(chip->fcc_votable, RESTRICT_CHG_VOTER,
+ chip->restricted_charging_enabled,
+ chip->restricted_current);
+
+no_change:
+ return count;
+}
+static CLASS_ATTR_RW(restrict_chg);
+
+static ssize_t restrict_cur_show(struct class *c, struct class_attribute *attr,
+ char *ubuf)
+{
+ struct pl_data *chip = container_of(c, struct pl_data,
+ qcom_batt_class);
+
+ return snprintf(ubuf, PAGE_SIZE, "%d\n", chip->restricted_current);
+}
+
+static ssize_t restrict_cur_store(struct class *c, struct class_attribute *attr,
+ const char *ubuf, size_t count)
+{
+ struct pl_data *chip = container_of(c, struct pl_data,
+ qcom_batt_class);
+ unsigned long val;
+
+ if (kstrtoul(ubuf, 10, &val))
+ return -EINVAL;
+
+ chip->restricted_current = val;
+
+ vote(chip->fcc_votable, RESTRICT_CHG_VOTER,
+ chip->restricted_charging_enabled,
+ chip->restricted_current);
+
+ return count;
+}
+static CLASS_ATTR_RW(restrict_cur);
+
+/****************************
+ * FCC STEPPING IN PROGRESS *
+ ****************************/
+static ssize_t fcc_stepping_in_progress_show(struct class *c,
+ struct class_attribute *attr, char *ubuf)
+{
+ struct pl_data *chip = container_of(c, struct pl_data,
+ qcom_batt_class);
+
+ return snprintf(ubuf, PAGE_SIZE, "%d\n", chip->step_fcc);
+}
+static CLASS_ATTR_RO(fcc_stepping_in_progress);
+
+static struct attribute *batt_class_attrs[] = {
+ [VER] = &class_attr_version.attr,
+ [SLAVE_PCT] = &class_attr_slave_pct.attr,
+ [RESTRICT_CHG_ENABLE] = &class_attr_restrict_chg.attr,
+ [RESTRICT_CHG_CURRENT] = &class_attr_restrict_cur.attr,
+ [FCC_STEPPING_IN_PROGRESS]
+ = &class_attr_fcc_stepping_in_progress.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(batt_class);
+
+/*********
+ * FCC *
+ **********/
+#define EFFICIENCY_PCT 80
+#define FCC_STEP_SIZE_UA 100000
+#define FCC_STEP_UPDATE_DELAY_MS 1000
+#define STEP_UP 1
+#define STEP_DOWN -1
+static void get_fcc_split(struct pl_data *chip, int total_ua,
+ int *master_ua, int *slave_ua)
+{
+ int rc, effective_total_ua, slave_limited_ua, hw_cc_delta_ua = 0,
+ icl_ua, adapter_uv, bcl_ua;
+ union power_supply_propval pval = {0, };
+
+ rc = power_supply_get_property(chip->main_psy,
+ POWER_SUPPLY_PROP_FCC_DELTA, &pval);
+ if (rc < 0)
+ hw_cc_delta_ua = 0;
+ else
+ hw_cc_delta_ua = pval.intval;
+
+ bcl_ua = INT_MAX;
+ if (chip->pl_mode == POWER_SUPPLY_PL_USBMID_USBMID) {
+ rc = power_supply_get_property(chip->main_psy,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+ return;
+ }
+ icl_ua = pval.intval;
+
+ rc = power_supply_get_property(chip->main_psy,
+ POWER_SUPPLY_PROP_INPUT_VOLTAGE_SETTLED, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get adaptive voltage rc=%d\n", rc);
+ return;
+ }
+ adapter_uv = pval.intval;
+
+ bcl_ua = div64_s64((s64)icl_ua * adapter_uv * EFFICIENCY_PCT,
+ (s64)get_effective_result(chip->fv_votable) * 100);
+ }
+
+ effective_total_ua = max(0, total_ua + hw_cc_delta_ua);
+ slave_limited_ua = min(effective_total_ua, bcl_ua);
+ *slave_ua = (slave_limited_ua * chip->slave_pct) / 100;
+ *slave_ua = min(*slave_ua, chip->pl_fcc_max);
+
+ /*
+ * In stacked BATFET configuration charger's current goes
+ * through main charger's BATFET, keep the main charger's FCC
+ * to the votable result.
+ */
+ if (chip->pl_batfet_mode == POWER_SUPPLY_PL_STACKED_BATFET) {
+ *master_ua = max(0, total_ua);
+ if (chip->main_fcc_max)
+ *master_ua = min(*master_ua,
+ chip->main_fcc_max + *slave_ua);
+ } else {
+ *master_ua = max(0, total_ua - *slave_ua);
+ if (chip->main_fcc_max)
+ *master_ua = min(*master_ua, chip->main_fcc_max);
+ }
+}
+
+static void get_fcc_stepper_params(struct pl_data *chip, int main_fcc_ua,
+ int parallel_fcc_ua)
+{
+ union power_supply_propval pval = {0, };
+ int rc;
+
+ /* Read current FCC of main charger */
+ rc = power_supply_get_property(chip->main_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get main charger current fcc, rc=%d\n", rc);
+ return;
+ }
+ chip->main_fcc_ua = pval.intval;
+
+ chip->main_step_fcc_dir = (main_fcc_ua > pval.intval) ?
+ STEP_UP : STEP_DOWN;
+ chip->main_step_fcc_count = abs((main_fcc_ua - pval.intval) /
+ FCC_STEP_SIZE_UA);
+ chip->main_step_fcc_residual = (main_fcc_ua - pval.intval) %
+ FCC_STEP_SIZE_UA;
+
+ chip->parallel_step_fcc_dir = (parallel_fcc_ua > chip->slave_fcc_ua) ?
+ STEP_UP : STEP_DOWN;
+ chip->parallel_step_fcc_count = abs((parallel_fcc_ua -
+ chip->slave_fcc_ua) / FCC_STEP_SIZE_UA);
+ chip->parallel_step_fcc_residual = (parallel_fcc_ua -
+ chip->slave_fcc_ua) % FCC_STEP_SIZE_UA;
+
+ if (chip->parallel_step_fcc_count || chip->parallel_step_fcc_residual
+ || chip->main_step_fcc_count || chip->main_step_fcc_residual)
+ chip->step_fcc = 1;
+
+ pr_debug("Main FCC Stepper parameters: main_step_direction: %d, main_step_count: %d, main_residual_fcc: %d\n",
+ chip->main_step_fcc_dir, chip->main_step_fcc_count,
+ chip->main_step_fcc_residual);
+ pr_debug("Parallel FCC Stepper parameters: parallel_step_direction: %d, parallel_step_count: %d, parallel_residual_fcc: %d\n",
+ chip->parallel_step_fcc_dir, chip->parallel_step_fcc_count,
+ chip->parallel_step_fcc_residual);
+}
+
+#define MINIMUM_PARALLEL_FCC_UA 500000
+#define PL_TAPER_WORK_DELAY_MS 500
+#define TAPER_RESIDUAL_PCT 90
+#define TAPER_REDUCTION_UA 200000
+static void pl_taper_work(struct work_struct *work)
+{
+ struct pl_data *chip = container_of(work, struct pl_data,
+ pl_taper_work);
+ union power_supply_propval pval = {0, };
+ int rc;
+ int eff_fcc_ua;
+ int total_fcc_ua, master_fcc_ua, slave_fcc_ua = 0;
+
+ chip->taper_entry_fv = get_effective_result(chip->fv_votable);
+ chip->taper_work_running = true;
+ while (true) {
+ if (get_effective_result(chip->pl_disable_votable)) {
+ /*
+ * if parallel's FCC share is low, simply disable
+ * parallel with TAPER_END_VOTER
+ */
+ total_fcc_ua = get_effective_result_locked(
+ chip->fcc_votable);
+ get_fcc_split(chip, total_fcc_ua, &master_fcc_ua,
+ &slave_fcc_ua);
+ if (slave_fcc_ua <= MINIMUM_PARALLEL_FCC_UA) {
+ pl_dbg(chip, PR_PARALLEL, "terminating: parallel's share is low\n");
+ vote(chip->pl_disable_votable, TAPER_END_VOTER,
+ true, 0);
+ } else {
+ pl_dbg(chip, PR_PARALLEL, "terminating: parallel disabled\n");
+ }
+ goto done;
+ }
+
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get batt charge type rc=%d\n", rc);
+ goto done;
+ }
+
+ chip->charge_type = pval.intval;
+ if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
+ eff_fcc_ua = get_effective_result(chip->fcc_votable);
+ if (eff_fcc_ua < 0) {
+ pr_err("Couldn't get fcc, exiting taper work\n");
+ goto done;
+ }
+ eff_fcc_ua = eff_fcc_ua - TAPER_REDUCTION_UA;
+ if (eff_fcc_ua < 0) {
+ pr_err("Can't reduce FCC any more\n");
+ goto done;
+ }
+
+ pl_dbg(chip, PR_PARALLEL, "master is taper charging; reducing FCC to %dua\n",
+ eff_fcc_ua);
+ vote(chip->fcc_votable, TAPER_STEPPER_VOTER,
+ true, eff_fcc_ua);
+ } else {
+ /*
+ * Due to reduction of float voltage in JEITA condition
+ * taper charging can be initiated at a lower FV. On
+ * removal of JEITA condition, FV readjusts itself.
+ * However, once taper charging is initiated, it doesn't
+ * exits until parallel chaging is disabled due to which
+ * FCC doesn't scale back to its original value, leading
+ * to slow charging thereafter.
+ * Check if FV increases in comparison to FV at which
+ * taper charging was initiated, and if yes, exit taper
+ * charging.
+ */
+ if (get_effective_result(chip->fv_votable) >
+ chip->taper_entry_fv) {
+ pl_dbg(chip, PR_PARALLEL, "Float voltage increased. Exiting taper\n");
+ goto done;
+ } else {
+ pl_dbg(chip, PR_PARALLEL, "master is fast charging; waiting for next taper\n");
+ }
+ }
+ /* wait for the charger state to deglitch after FCC change */
+ msleep(PL_TAPER_WORK_DELAY_MS);
+ }
+done:
+ chip->taper_work_running = false;
+ vote(chip->fcc_votable, TAPER_STEPPER_VOTER, false, 0);
+ vote(chip->pl_awake_votable, TAPER_END_VOTER, false, 0);
+}
+
+static int pl_fcc_vote_callback(struct votable *votable, void *data,
+ int total_fcc_ua, const char *client)
+{
+ struct pl_data *chip = data;
+ int master_fcc_ua = total_fcc_ua, slave_fcc_ua = 0;
+
+ if (total_fcc_ua < 0)
+ return 0;
+
+ if (!chip->main_psy)
+ return 0;
+
+ if (chip->pl_mode != POWER_SUPPLY_PL_NONE) {
+ get_fcc_split(chip, total_fcc_ua, &master_fcc_ua,
+ &slave_fcc_ua);
+
+ if (slave_fcc_ua > MINIMUM_PARALLEL_FCC_UA) {
+ vote(chip->pl_disable_votable, PL_FCC_LOW_VOTER,
+ false, 0);
+ } else {
+ vote(chip->pl_disable_votable, PL_FCC_LOW_VOTER,
+ true, 0);
+ }
+ }
+
+ rerun_election(chip->pl_disable_votable);
+
+ return 0;
+}
+
+static void fcc_stepper_work(struct work_struct *work)
+{
+ struct pl_data *chip = container_of(work, struct pl_data,
+ fcc_stepper_work.work);
+ union power_supply_propval pval = {0, };
+ int reschedule_ms = 0, rc = 0, charger_present = 0;
+ int main_fcc = chip->main_fcc_ua;
+ int parallel_fcc = chip->slave_fcc_ua;
+
+ /* Check whether USB is present or not */
+ rc = power_supply_get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ if (rc < 0)
+ pr_err("Couldn't get USB Present status, rc=%d\n", rc);
+
+ charger_present = pval.intval;
+
+ /*Check whether DC charger is present or not */
+ if (!chip->dc_psy)
+ chip->dc_psy = power_supply_get_by_name("dc");
+ if (chip->dc_psy) {
+ rc = power_supply_get_property(chip->dc_psy,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ if (rc < 0)
+ pr_err("Couldn't get DC Present status, rc=%d\n", rc);
+
+ charger_present |= pval.intval;
+ }
+
+ /*
+ * If USB is not present, then set parallel FCC to min value and
+ * main FCC to the effective value of FCC votable and exit.
+ */
+ if (!charger_present) {
+ /* Disable parallel */
+ parallel_fcc = 0;
+
+ if (chip->pl_psy) {
+ pval.intval = 1;
+ rc = power_supply_set_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't change slave suspend state rc=%d\n",
+ rc);
+ goto out;
+ }
+
+ chip->pl_disable = true;
+ power_supply_changed(chip->pl_psy);
+ }
+
+ main_fcc = get_effective_result_locked(chip->fcc_votable);
+ pval.intval = main_fcc;
+ rc = power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't set main charger fcc, rc=%d\n", rc);
+ goto out;
+ }
+
+ goto stepper_exit;
+ }
+
+ if (chip->main_step_fcc_count) {
+ main_fcc += (FCC_STEP_SIZE_UA * chip->main_step_fcc_dir);
+ chip->main_step_fcc_count--;
+ reschedule_ms = FCC_STEP_UPDATE_DELAY_MS;
+ } else if (chip->main_step_fcc_residual) {
+ main_fcc += chip->main_step_fcc_residual;
+ chip->main_step_fcc_residual = 0;
+ }
+
+ if (chip->parallel_step_fcc_count) {
+ parallel_fcc += (FCC_STEP_SIZE_UA *
+ chip->parallel_step_fcc_dir);
+ chip->parallel_step_fcc_count--;
+ reschedule_ms = FCC_STEP_UPDATE_DELAY_MS;
+ } else if (chip->parallel_step_fcc_residual) {
+ parallel_fcc += chip->parallel_step_fcc_residual;
+ chip->parallel_step_fcc_residual = 0;
+ }
+
+ if (parallel_fcc < chip->slave_fcc_ua) {
+ /* Set parallel FCC */
+ if (chip->pl_psy && !chip->pl_disable) {
+ if (parallel_fcc < MINIMUM_PARALLEL_FCC_UA) {
+ pval.intval = 1;
+ rc = power_supply_set_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't change slave suspend state rc=%d\n",
+ rc);
+ goto out;
+ }
+
+ if (IS_USBIN(chip->pl_mode))
+ split_settled(chip);
+
+ parallel_fcc = 0;
+ chip->parallel_step_fcc_count = 0;
+ chip->parallel_step_fcc_residual = 0;
+ chip->total_settled_ua = 0;
+ chip->pl_settled_ua = 0;
+ chip->pl_disable = true;
+ power_supply_changed(chip->pl_psy);
+ } else {
+ /* Set Parallel FCC */
+ pval.intval = parallel_fcc;
+ rc = power_supply_set_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ &pval);
+ if (rc < 0) {
+ pr_err("Couldn't set parallel charger fcc, rc=%d\n",
+ rc);
+ goto out;
+ }
+ }
+ }
+
+ /* Set main FCC */
+ pval.intval = main_fcc;
+ rc = power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't set main charger fcc, rc=%d\n", rc);
+ goto out;
+ }
+ } else {
+ /* Set main FCC */
+ pval.intval = main_fcc;
+ rc = power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't set main charger fcc, rc=%d\n", rc);
+ goto out;
+ }
+
+ /* Set parallel FCC */
+ if (chip->pl_psy) {
+ pval.intval = parallel_fcc;
+ rc = power_supply_set_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ &pval);
+ if (rc < 0) {
+ pr_err("Couldn't set parallel charger fcc, rc=%d\n",
+ rc);
+ goto out;
+ }
+
+ /*
+ * Enable parallel charger only if it was disabled
+ * earlier and configured slave fcc is greater than or
+ * equal to minimum parallel FCC value.
+ */
+ if (chip->pl_disable && parallel_fcc
+ >= MINIMUM_PARALLEL_FCC_UA) {
+ pval.intval = 0;
+ rc = power_supply_set_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't change slave suspend state rc=%d\n",
+ rc);
+ goto out;
+ }
+
+ if (IS_USBIN(chip->pl_mode))
+ split_settled(chip);
+
+ chip->pl_disable = false;
+ power_supply_changed(chip->pl_psy);
+ }
+ }
+ }
+
+stepper_exit:
+ chip->main_fcc_ua = main_fcc;
+ chip->slave_fcc_ua = parallel_fcc;
+
+ if (reschedule_ms) {
+ schedule_delayed_work(&chip->fcc_stepper_work,
+ msecs_to_jiffies(reschedule_ms));
+ pr_debug("Rescheduling FCC_STEPPER work\n");
+ return;
+ }
+out:
+ chip->step_fcc = 0;
+ vote(chip->pl_awake_votable, FCC_STEPPER_VOTER, false, 0);
+}
+
+#define PARALLEL_FLOAT_VOLTAGE_DELTA_UV 50000
+static int pl_fv_vote_callback(struct votable *votable, void *data,
+ int fv_uv, const char *client)
+{
+ struct pl_data *chip = data;
+ union power_supply_propval pval = {0, };
+ int rc = 0;
+
+ if (fv_uv < 0)
+ return 0;
+
+ if (!chip->main_psy)
+ return 0;
+
+ pval.intval = fv_uv;
+
+ rc = power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't set main fv, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (chip->pl_mode != POWER_SUPPLY_PL_NONE) {
+ pval.intval += PARALLEL_FLOAT_VOLTAGE_DELTA_UV;
+ rc = power_supply_set_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't set float on parallel rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+#define ICL_STEP_UA 25000
+#define PL_DELAY_MS 3000
+static int usb_icl_vote_callback(struct votable *votable, void *data,
+ int icl_ua, const char *client)
+{
+ int rc;
+ struct pl_data *chip = data;
+ union power_supply_propval pval = {0, };
+ bool rerun_aicl = false;
+
+ if (!chip->main_psy)
+ return 0;
+
+ if (client == NULL)
+ icl_ua = INT_MAX;
+
+ /*
+ * Disable parallel for new ICL vote - the call to split_settled will
+ * ensure that all the input current limit gets assigned to the main
+ * charger.
+ */
+ vote(chip->pl_disable_votable, ICL_CHANGE_VOTER, true, 0);
+
+ /*
+ * if (ICL < 1400)
+ * disable parallel charger using USBIN_I_VOTER
+ * else
+ * instead of re-enabling here rely on status_changed_work
+ * (triggered via AICL completed or scheduled from here to
+ * unvote USBIN_I_VOTER) the status_changed_work enables
+ * USBIN_I_VOTER based on settled current.
+ */
+ if (icl_ua <= 1400000)
+ vote(chip->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
+ else
+ schedule_delayed_work(&chip->status_change_work,
+ msecs_to_jiffies(PL_DELAY_MS));
+
+ /* rerun AICL */
+ /* get the settled current */
+ rc = power_supply_get_property(chip->main_psy,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+ &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+ return rc;
+ }
+
+ /* rerun AICL if new ICL is above settled ICL */
+ if (icl_ua > pval.intval)
+ rerun_aicl = true;
+
+ if (rerun_aicl && (chip->wa_flags & AICL_RERUN_WA_BIT)) {
+ /* set a lower ICL */
+ pval.intval = max(pval.intval - ICL_STEP_UA, ICL_STEP_UA);
+ power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ &pval);
+ }
+
+ /* set the effective ICL */
+ pval.intval = icl_ua;
+ power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ &pval);
+
+ vote(chip->pl_disable_votable, ICL_CHANGE_VOTER, false, 0);
+
+ return 0;
+}
+
+static void pl_disable_forever_work(struct work_struct *work)
+{
+ struct pl_data *chip = container_of(work,
+ struct pl_data, pl_disable_forever_work);
+
+ /* Disable Parallel charger forever */
+ vote(chip->pl_disable_votable, PL_HW_ABSENT_VOTER, true, 0);
+
+ /* Re-enable autonomous mode */
+ if (chip->hvdcp_hw_inov_dis_votable)
+ vote(chip->hvdcp_hw_inov_dis_votable, PL_VOTER, false, 0);
+}
+
+static void pl_awake_work(struct work_struct *work)
+{
+ struct pl_data *chip = container_of(work,
+ struct pl_data, pl_awake_work.work);
+
+ vote(chip->pl_awake_votable, PL_VOTER, false, 0);
+}
+
+static bool is_main_available(struct pl_data *chip)
+{
+ if (chip->main_psy)
+ return true;
+
+ chip->main_psy = power_supply_get_by_name("main");
+
+ return !!chip->main_psy;
+}
+
+static bool is_batt_available(struct pl_data *chip)
+{
+ if (!chip->batt_psy)
+ chip->batt_psy = power_supply_get_by_name("battery");
+
+ if (!chip->batt_psy)
+ return false;
+
+ return true;
+}
+
+static int pl_disable_vote_callback(struct votable *votable,
+ void *data, int pl_disable, const char *client)
+{
+ struct pl_data *chip = data;
+ union power_supply_propval pval = {0, };
+ int master_fcc_ua = 0, total_fcc_ua = 0, slave_fcc_ua = 0;
+ int rc = 0;
+ bool disable = false;
+
+ if (!is_main_available(chip))
+ return -ENODEV;
+
+ if (!is_batt_available(chip))
+ return -ENODEV;
+
+ if (!chip->usb_psy)
+ chip->usb_psy = power_supply_get_by_name("usb");
+ if (!chip->usb_psy) {
+ pr_err("Couldn't get usb psy\n");
+ return -ENODEV;
+ }
+
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_FCC_STEPPER_ENABLE, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't read FCC step update status, rc=%d\n", rc);
+ return rc;
+ }
+ chip->fcc_stepper_enable = pval.intval;
+ pr_debug("FCC Stepper %s\n", pval.intval ? "enabled" : "disabled");
+
+ rc = power_supply_get_property(chip->main_psy,
+ POWER_SUPPLY_PROP_MAIN_FCC_MAX, &pval);
+ if (rc < 0) {
+ pl_dbg(chip, PR_PARALLEL,
+ "Couldn't read primary charger FCC upper limit, rc=%d\n",
+ rc);
+ } else if (pval.intval > 0) {
+ chip->main_fcc_max = pval.intval;
+ }
+
+ if (chip->fcc_stepper_enable) {
+ cancel_delayed_work_sync(&chip->fcc_stepper_work);
+ vote(chip->pl_awake_votable, FCC_STEPPER_VOTER, false, 0);
+ }
+
+ total_fcc_ua = get_effective_result_locked(chip->fcc_votable);
+
+ if (chip->pl_mode != POWER_SUPPLY_PL_NONE && !pl_disable) {
+ /* keep system awake to talk to slave charger through i2c */
+ cancel_delayed_work_sync(&chip->pl_awake_work);
+ vote(chip->pl_awake_votable, PL_VOTER, true, 0);
+
+ rc = validate_parallel_icl(chip, &disable);
+ if (rc < 0)
+ return rc;
+
+ if (disable) {
+ pr_info("Parallel ICL is less than min ICL(%d), skipping parallel enable\n",
+ chip->pl_min_icl_ua);
+ return 0;
+ }
+
+ /* enable parallel charging */
+ rc = power_supply_get_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
+ if (rc == -ENODEV) {
+ /*
+ * -ENODEV is returned only if parallel chip
+ * is not present in the system.
+ * Disable parallel charger forever.
+ */
+ schedule_work(&chip->pl_disable_forever_work);
+ return rc;
+ }
+
+ rerun_election(chip->fv_votable);
+
+ get_fcc_split(chip, total_fcc_ua, &master_fcc_ua,
+ &slave_fcc_ua);
+
+ if (chip->fcc_stepper_enable) {
+ get_fcc_stepper_params(chip, master_fcc_ua,
+ slave_fcc_ua);
+ if (chip->step_fcc) {
+ vote(chip->pl_awake_votable, FCC_STEPPER_VOTER,
+ true, 0);
+ schedule_delayed_work(&chip->fcc_stepper_work,
+ 0);
+ }
+ } else {
+ /*
+ * If there is an increase in slave share
+ * (Also handles parallel enable case)
+ * Set Main ICL then slave FCC
+ * else
+ * (Also handles parallel disable case)
+ * Set slave ICL then main FCC.
+ */
+ if (slave_fcc_ua > chip->slave_fcc_ua) {
+ pval.intval = master_fcc_ua;
+ rc = power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ &pval);
+ if (rc < 0) {
+ pr_err("Could not set main fcc, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ pval.intval = slave_fcc_ua;
+ rc = power_supply_set_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ &pval);
+ if (rc < 0) {
+ pr_err("Couldn't set parallel fcc, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ chip->slave_fcc_ua = slave_fcc_ua;
+ } else {
+ pval.intval = slave_fcc_ua;
+ rc = power_supply_set_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ &pval);
+ if (rc < 0) {
+ pr_err("Couldn't set parallel fcc, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ chip->slave_fcc_ua = slave_fcc_ua;
+
+ pval.intval = master_fcc_ua;
+ rc = power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ &pval);
+ if (rc < 0) {
+ pr_err("Could not set main fcc, rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ /*
+ * Enable will be called with a valid pl_psy always. The
+ * PARALLEL_PSY_VOTER keeps it disabled unless a pl_psy
+ * is seen.
+ */
+ pval.intval = 0;
+ rc = power_supply_set_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
+ if (rc < 0)
+ pr_err("Couldn't change slave suspend state rc=%d\n",
+ rc);
+
+ if (IS_USBIN(chip->pl_mode))
+ split_settled(chip);
+ }
+
+ /*
+ * we could have been enabled while in taper mode,
+ * start the taper work if so
+ */
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get batt charge type rc=%d\n", rc);
+ } else {
+ if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER
+ && !chip->taper_work_running) {
+ pl_dbg(chip, PR_PARALLEL,
+ "pl enabled in Taper scheduing work\n");
+ vote(chip->pl_awake_votable, TAPER_END_VOTER,
+ true, 0);
+ queue_work(system_long_wq,
+ &chip->pl_taper_work);
+ }
+ }
+
+ pl_dbg(chip, PR_PARALLEL, "master_fcc=%d slave_fcc=%d distribution=(%d/%d)\n",
+ master_fcc_ua, slave_fcc_ua,
+ (master_fcc_ua * 100) / total_fcc_ua,
+ (slave_fcc_ua * 100) / total_fcc_ua);
+ } else {
+ if (chip->main_fcc_max)
+ total_fcc_ua = min(total_fcc_ua,
+ chip->main_fcc_max);
+
+ if (!chip->fcc_stepper_enable) {
+ if (IS_USBIN(chip->pl_mode))
+ split_settled(chip);
+
+ /* pl_psy may be NULL while in the disable branch */
+ if (chip->pl_psy) {
+ pval.intval = 1;
+ rc = power_supply_set_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
+ if (rc < 0)
+ pr_err("Couldn't change slave suspend state rc=%d\n",
+ rc);
+ }
+
+ /* main psy gets all share */
+ pval.intval = total_fcc_ua;
+ rc = power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ &pval);
+ if (rc < 0) {
+ pr_err("Could not set main fcc, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* reset parallel FCC */
+ chip->slave_fcc_ua = 0;
+ chip->total_settled_ua = 0;
+ chip->pl_settled_ua = 0;
+ } else {
+ get_fcc_stepper_params(chip, total_fcc_ua, 0);
+ if (chip->step_fcc) {
+ vote(chip->pl_awake_votable, FCC_STEPPER_VOTER,
+ true, 0);
+ schedule_delayed_work(&chip->fcc_stepper_work,
+ 0);
+ }
+ }
+
+ rerun_election(chip->fv_votable);
+
+ cancel_delayed_work_sync(&chip->pl_awake_work);
+ schedule_delayed_work(&chip->pl_awake_work,
+ msecs_to_jiffies(5000));
+ }
+
+ /* notify parallel state change */
+ if (chip->pl_psy && (chip->pl_disable != pl_disable)
+ && !chip->fcc_stepper_enable) {
+ power_supply_changed(chip->pl_psy);
+ chip->pl_disable = (bool)pl_disable;
+ }
+
+ pl_dbg(chip, PR_PARALLEL, "parallel charging %s\n",
+ pl_disable ? "disabled" : "enabled");
+
+ return 0;
+}
+
+static int pl_enable_indirect_vote_callback(struct votable *votable,
+ void *data, int pl_enable, const char *client)
+{
+ struct pl_data *chip = data;
+
+ vote(chip->pl_disable_votable, PL_INDIRECT_VOTER, !pl_enable, 0);
+
+ return 0;
+}
+
+static int pl_awake_vote_callback(struct votable *votable,
+ void *data, int awake, const char *client)
+{
+ struct pl_data *chip = data;
+
+ if (awake)
+ __pm_stay_awake(chip->pl_ws);
+ else
+ __pm_relax(chip->pl_ws);
+
+ pr_debug("client: %s awake: %d\n", client, awake);
+ return 0;
+}
+
+static bool is_parallel_available(struct pl_data *chip)
+{
+ union power_supply_propval pval = {0, };
+ int rc;
+
+ if (chip->pl_psy)
+ return true;
+
+ chip->pl_psy = power_supply_get_by_name("parallel");
+ if (!chip->pl_psy)
+ return false;
+
+ vote(chip->pl_disable_votable, PARALLEL_PSY_VOTER, false, 0);
+
+ rc = power_supply_get_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_PARALLEL_MODE, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get parallel mode from parallel rc=%d\n",
+ rc);
+ return false;
+ }
+ /*
+ * Note that pl_mode will be updated to anything other than a _NONE
+ * only after pl_psy is found. IOW pl_mode != _NONE implies that
+ * pl_psy is present and valid.
+ */
+ chip->pl_mode = pval.intval;
+
+ /* Disable autonomous votage increments for USBIN-USBIN */
+ if (IS_USBIN(chip->pl_mode)
+ && (chip->wa_flags & FORCE_INOV_DISABLE_BIT)) {
+ if (!chip->hvdcp_hw_inov_dis_votable)
+ chip->hvdcp_hw_inov_dis_votable =
+ find_votable("HVDCP_HW_INOV_DIS");
+ if (chip->hvdcp_hw_inov_dis_votable)
+ /* Read current pulse count */
+ vote(chip->hvdcp_hw_inov_dis_votable, PL_VOTER,
+ true, 0);
+ else
+ return false;
+ }
+
+ rc = power_supply_get_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_PARALLEL_BATFET_MODE, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get parallel batfet mode rc=%d\n",
+ rc);
+ return false;
+ }
+ chip->pl_batfet_mode = pval.intval;
+
+ pval.intval = 0;
+ power_supply_get_property(chip->pl_psy, POWER_SUPPLY_PROP_MIN_ICL,
+ &pval);
+ chip->pl_min_icl_ua = pval.intval;
+
+ chip->pl_fcc_max = INT_MAX;
+ rc = power_supply_get_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_PARALLEL_FCC_MAX, &pval);
+ if (!rc)
+ chip->pl_fcc_max = pval.intval;
+
+ return true;
+}
+
+static void handle_main_charge_type(struct pl_data *chip)
+{
+ union power_supply_propval pval = {0, };
+ int rc;
+
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get batt charge type rc=%d\n", rc);
+ return;
+ }
+
+ /* not fast/not taper state to disables parallel */
+ if ((pval.intval != POWER_SUPPLY_CHARGE_TYPE_FAST)
+ && (pval.intval != POWER_SUPPLY_CHARGE_TYPE_TAPER)) {
+ vote(chip->pl_disable_votable, CHG_STATE_VOTER, true, 0);
+ chip->charge_type = pval.intval;
+ return;
+ }
+
+ /* handle taper charge entry */
+ if (chip->charge_type == POWER_SUPPLY_CHARGE_TYPE_FAST
+ && (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER)) {
+ chip->charge_type = pval.intval;
+ if (!chip->taper_work_running) {
+ pl_dbg(chip, PR_PARALLEL, "taper entry scheduling work\n");
+ vote(chip->pl_awake_votable, TAPER_END_VOTER, true, 0);
+ queue_work(system_long_wq, &chip->pl_taper_work);
+ }
+ return;
+ }
+
+ /* handle fast/taper charge entry */
+ if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER
+ || pval.intval == POWER_SUPPLY_CHARGE_TYPE_FAST) {
+ /*
+ * Undo parallel charging termination if entered taper in
+ * reduced float voltage condition due to jeita mitigation.
+ */
+ if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_FAST &&
+ (chip->taper_entry_fv <
+ get_effective_result(chip->fv_votable))) {
+ vote(chip->pl_disable_votable, TAPER_END_VOTER,
+ false, 0);
+ }
+ pl_dbg(chip, PR_PARALLEL, "chg_state enabling parallel\n");
+ vote(chip->pl_disable_votable, CHG_STATE_VOTER, false, 0);
+ chip->charge_type = pval.intval;
+ return;
+ }
+
+ /* remember the new state only if it isn't any of the above */
+ chip->charge_type = pval.intval;
+}
+
+#define MIN_ICL_CHANGE_DELTA_UA 300000
+static void handle_settled_icl_change(struct pl_data *chip)
+{
+ union power_supply_propval pval = {0, };
+ int new_total_settled_ua;
+ int rc;
+ int main_settled_ua;
+ int main_limited;
+ int total_current_ua;
+ bool disable = false;
+
+ total_current_ua = get_effective_result_locked(chip->usb_icl_votable);
+
+ /*
+ * call aicl split only when USBIN_USBIN and enabled
+ * and if aicl changed
+ */
+ rc = power_supply_get_property(chip->main_psy,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+ &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+ return;
+ }
+ main_settled_ua = pval.intval;
+
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+ &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+ return;
+ }
+ main_limited = pval.intval;
+
+ if ((main_limited && (main_settled_ua + chip->pl_settled_ua) < 1400000)
+ || (main_settled_ua == 0)
+ || ((total_current_ua >= 0) &&
+ (total_current_ua <= 1400000)))
+ vote(chip->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
+ else
+ vote(chip->pl_enable_votable_indirect, USBIN_I_VOTER, true, 0);
+
+ rerun_election(chip->fcc_votable);
+
+ if (IS_USBIN(chip->pl_mode)) {
+ /*
+ * call aicl split only when USBIN_USBIN and enabled
+ * and if settled current has changed by more than 300mA
+ */
+
+ new_total_settled_ua = main_settled_ua + chip->pl_settled_ua;
+ pl_dbg(chip, PR_PARALLEL,
+ "total_settled_ua=%d settled_ua=%d new_total_settled_ua=%d\n",
+ chip->total_settled_ua, pval.intval,
+ new_total_settled_ua);
+
+ /* If ICL change is small skip splitting */
+ if (abs(new_total_settled_ua - chip->total_settled_ua)
+ > MIN_ICL_CHANGE_DELTA_UA) {
+ rc = validate_parallel_icl(chip, &disable);
+ if (rc < 0)
+ return;
+
+ vote(chip->pl_disable_votable, ICL_LIMIT_VOTER,
+ disable, 0);
+ if (!get_effective_result_locked(
+ chip->pl_disable_votable))
+ split_settled(chip);
+ }
+ }
+}
+
+static void handle_parallel_in_taper(struct pl_data *chip)
+{
+ union power_supply_propval pval = {0, };
+ int rc;
+
+ if (get_effective_result_locked(chip->pl_disable_votable))
+ return;
+
+ if (!chip->pl_psy)
+ return;
+
+ rc = power_supply_get_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get pl charge type rc=%d\n", rc);
+ return;
+ }
+
+ /*
+ * if parallel is seen in taper mode ever, that is an anomaly and
+ * we disable parallel charger
+ */
+ if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
+ vote(chip->pl_disable_votable, PL_TAPER_EARLY_BAD_VOTER,
+ true, 0);
+ return;
+ }
+}
+
+static void handle_usb_change(struct pl_data *chip)
+{
+ int rc;
+ union power_supply_propval pval = {0, };
+
+ if (!chip->usb_psy)
+ chip->usb_psy = power_supply_get_by_name("usb");
+ if (!chip->usb_psy) {
+ pr_err("Couldn't get usbpsy\n");
+ return;
+ }
+
+ rc = power_supply_get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get present from USB rc=%d\n", rc);
+ return;
+ }
+
+ if (!pval.intval) {
+ /* USB removed: remove all stale votes */
+ vote(chip->pl_disable_votable, TAPER_END_VOTER, false, 0);
+ vote(chip->pl_disable_votable, PL_TAPER_EARLY_BAD_VOTER,
+ false, 0);
+ vote(chip->pl_disable_votable, ICL_LIMIT_VOTER, false, 0);
+ }
+}
+
+static void status_change_work(struct work_struct *work)
+{
+ struct pl_data *chip = container_of(work,
+ struct pl_data, status_change_work.work);
+
+ if (!chip->main_psy && is_main_available(chip)) {
+ /*
+ * re-run election for FCC/FV/ICL once main_psy
+ * is available to ensure all votes are reflected
+ * on hardware
+ */
+ rerun_election(chip->usb_icl_votable);
+ rerun_election(chip->fcc_votable);
+ rerun_election(chip->fv_votable);
+ }
+
+ if (!chip->main_psy)
+ return;
+
+ if (!is_batt_available(chip))
+ return;
+
+ is_parallel_available(chip);
+
+ handle_usb_change(chip);
+ handle_main_charge_type(chip);
+ handle_settled_icl_change(chip);
+ handle_parallel_in_taper(chip);
+}
+
+static int pl_notifier_call(struct notifier_block *nb,
+ unsigned long ev, void *v)
+{
+ struct power_supply *psy = v;
+ struct pl_data *chip = container_of(nb, struct pl_data, nb);
+
+ if (ev != PSY_EVENT_PROP_CHANGED)
+ return NOTIFY_OK;
+
+ if ((strcmp(psy->desc->name, "parallel") == 0)
+ || (strcmp(psy->desc->name, "battery") == 0)
+ || (strcmp(psy->desc->name, "main") == 0))
+ schedule_delayed_work(&chip->status_change_work, 0);
+
+ return NOTIFY_OK;
+}
+
+static int pl_register_notifier(struct pl_data *chip)
+{
+ int rc;
+
+ chip->nb.notifier_call = pl_notifier_call;
+ rc = power_supply_reg_notifier(&chip->nb);
+ if (rc < 0) {
+ pr_err("Couldn't register psy notifier rc = %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int pl_determine_initial_status(struct pl_data *chip)
+{
+ status_change_work(&chip->status_change_work.work);
+ return 0;
+}
+
+static void pl_config_init(struct pl_data *chip, int smb_version)
+{
+ switch (smb_version) {
+ case PMI8998_SUBTYPE:
+ case PM660_SUBTYPE:
+ chip->wa_flags = AICL_RERUN_WA_BIT | FORCE_INOV_DISABLE_BIT;
+ break;
+ case PMI632_SUBTYPE:
+ break;
+ default:
+ break;
+ }
+}
+
+static void qcom_batt_create_debugfs(struct pl_data *chip)
+{
+ struct dentry *entry;
+
+ chip->dfs_root = debugfs_create_dir("battery", NULL);
+ if (IS_ERR_OR_NULL(chip->dfs_root)) {
+ pr_err("Couldn't create battery debugfs rc=%ld\n",
+ (long)chip->dfs_root);
+ return;
+ }
+
+ entry = debugfs_create_u32("debug_mask", 0600, chip->dfs_root,
+ &debug_mask);
+ if (IS_ERR_OR_NULL(entry))
+ pr_err("Couldn't create force_dc_psy_update file rc=%ld\n",
+ (long)entry);
+}
+
+#define DEFAULT_RESTRICTED_CURRENT_UA 1000000
+int qcom_batt_init(int smb_version)
+{
+ struct pl_data *chip;
+ int rc = 0;
+
+ /* initialize just once */
+ if (the_chip) {
+ pr_err("was initialized earlier. Failing now\n");
+ return -EINVAL;
+ }
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ qcom_batt_create_debugfs(chip);
+
+ chip->slave_pct = 50;
+ pl_config_init(chip, smb_version);
+ chip->restricted_current = DEFAULT_RESTRICTED_CURRENT_UA;
+
+ chip->pl_ws = wakeup_source_register("qcom-battery");
+ if (!chip->pl_ws)
+ goto cleanup;
+
+ chip->fcc_votable = create_votable("FCC", VOTE_MIN,
+ pl_fcc_vote_callback,
+ chip);
+ if (IS_ERR(chip->fcc_votable)) {
+ rc = PTR_ERR(chip->fcc_votable);
+ chip->fcc_votable = NULL;
+ goto release_wakeup_source;
+ }
+
+ chip->fv_votable = create_votable("FV", VOTE_MIN,
+ pl_fv_vote_callback,
+ chip);
+ if (IS_ERR(chip->fv_votable)) {
+ rc = PTR_ERR(chip->fv_votable);
+ chip->fv_votable = NULL;
+ goto destroy_votable;
+ }
+
+ chip->usb_icl_votable = create_votable("USB_ICL", VOTE_MIN,
+ usb_icl_vote_callback,
+ chip);
+ if (IS_ERR(chip->usb_icl_votable)) {
+ rc = PTR_ERR(chip->usb_icl_votable);
+ chip->usb_icl_votable = NULL;
+ goto destroy_votable;
+ }
+
+ chip->pl_disable_votable = create_votable("PL_DISABLE", VOTE_SET_ANY,
+ pl_disable_vote_callback,
+ chip);
+ if (IS_ERR(chip->pl_disable_votable)) {
+ rc = PTR_ERR(chip->pl_disable_votable);
+ chip->pl_disable_votable = NULL;
+ goto destroy_votable;
+ }
+ vote(chip->pl_disable_votable, CHG_STATE_VOTER, true, 0);
+ vote(chip->pl_disable_votable, TAPER_END_VOTER, false, 0);
+ vote(chip->pl_disable_votable, PARALLEL_PSY_VOTER, true, 0);
+
+ chip->pl_awake_votable = create_votable("PL_AWAKE", VOTE_SET_ANY,
+ pl_awake_vote_callback,
+ chip);
+ if (IS_ERR(chip->pl_awake_votable)) {
+ rc = PTR_ERR(chip->pl_awake_votable);
+ chip->pl_awake_votable = NULL;
+ goto destroy_votable;
+ }
+
+ chip->pl_enable_votable_indirect = create_votable("PL_ENABLE_INDIRECT",
+ VOTE_SET_ANY,
+ pl_enable_indirect_vote_callback,
+ chip);
+ if (IS_ERR(chip->pl_enable_votable_indirect)) {
+ rc = PTR_ERR(chip->pl_enable_votable_indirect);
+ chip->pl_enable_votable_indirect = NULL;
+ goto destroy_votable;
+ }
+
+ vote(chip->pl_disable_votable, PL_INDIRECT_VOTER, true, 0);
+
+ INIT_DELAYED_WORK(&chip->status_change_work, status_change_work);
+ INIT_WORK(&chip->pl_taper_work, pl_taper_work);
+ INIT_WORK(&chip->pl_disable_forever_work, pl_disable_forever_work);
+ INIT_DELAYED_WORK(&chip->pl_awake_work, pl_awake_work);
+ INIT_DELAYED_WORK(&chip->fcc_stepper_work, fcc_stepper_work);
+
+ rc = pl_register_notifier(chip);
+ if (rc < 0) {
+ pr_err("Couldn't register psy notifier rc = %d\n", rc);
+ goto unreg_notifier;
+ }
+
+ rc = pl_determine_initial_status(chip);
+ if (rc < 0) {
+ pr_err("Couldn't determine initial status rc=%d\n", rc);
+ goto unreg_notifier;
+ }
+
+ chip->pl_disable = true;
+ chip->qcom_batt_class.name = "qcom-battery",
+ chip->qcom_batt_class.owner = THIS_MODULE,
+ chip->qcom_batt_class.class_groups = batt_class_groups;
+
+ rc = class_register(&chip->qcom_batt_class);
+ if (rc < 0) {
+ pr_err("couldn't register pl_data sysfs class rc = %d\n", rc);
+ goto unreg_notifier;
+ }
+
+ the_chip = chip;
+
+ return 0;
+
+unreg_notifier:
+ power_supply_unreg_notifier(&chip->nb);
+destroy_votable:
+ destroy_votable(chip->pl_enable_votable_indirect);
+ destroy_votable(chip->pl_awake_votable);
+ destroy_votable(chip->pl_disable_votable);
+ destroy_votable(chip->fv_votable);
+ destroy_votable(chip->fcc_votable);
+ destroy_votable(chip->usb_icl_votable);
+release_wakeup_source:
+ wakeup_source_unregister(chip->pl_ws);
+cleanup:
+ kfree(chip);
+ return rc;
+}
+
+void qcom_batt_deinit(void)
+{
+ struct pl_data *chip = the_chip;
+
+ if (chip == NULL)
+ return;
+
+ cancel_delayed_work_sync(&chip->status_change_work);
+ cancel_work_sync(&chip->pl_taper_work);
+ cancel_work_sync(&chip->pl_disable_forever_work);
+ cancel_delayed_work_sync(&chip->pl_awake_work);
+ cancel_delayed_work_sync(&chip->fcc_stepper_work);
+
+ power_supply_unreg_notifier(&chip->nb);
+ destroy_votable(chip->pl_enable_votable_indirect);
+ destroy_votable(chip->pl_awake_votable);
+ destroy_votable(chip->pl_disable_votable);
+ destroy_votable(chip->fv_votable);
+ destroy_votable(chip->fcc_votable);
+ wakeup_source_unregister(chip->pl_ws);
+ the_chip = NULL;
+ kfree(chip);
+}
diff --git a/drivers/power/supply/qcom/battery.h b/drivers/power/supply/qcom/battery.h
new file mode 100644
index 0000000..53a54e8
--- /dev/null
+++ b/drivers/power/supply/qcom/battery.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __BATTERY_H
+#define __BATTERY_H
+int qcom_batt_init(int smb_version);
+void qcom_batt_deinit(void);
+#endif /* __BATTERY_H */
diff --git a/drivers/power/supply/qcom/pmic-voter.c b/drivers/power/supply/qcom/pmic-voter.c
new file mode 100644
index 0000000..8b7f0c8
--- /dev/null
+++ b/drivers/power/supply/qcom/pmic-voter.c
@@ -0,0 +1,688 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/bitops.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include <linux/pmic-voter.h>
+
+#define NUM_MAX_CLIENTS 16
+#define DEBUG_FORCE_CLIENT "DEBUG_FORCE_CLIENT"
+
+static DEFINE_SPINLOCK(votable_list_slock);
+static LIST_HEAD(votable_list);
+
+static struct dentry *debug_root;
+
+struct client_vote {
+ bool enabled;
+ int value;
+};
+
+struct votable {
+ const char *name;
+ struct list_head list;
+ struct client_vote votes[NUM_MAX_CLIENTS];
+ int num_clients;
+ int type;
+ int effective_client_id;
+ int effective_result;
+ struct mutex vote_lock;
+ void *data;
+ int (*callback)(struct votable *votable,
+ void *data,
+ int effective_result,
+ const char *effective_client);
+ char *client_strs[NUM_MAX_CLIENTS];
+ bool voted_on;
+ struct dentry *root;
+ struct dentry *status_ent;
+ u32 force_val;
+ struct dentry *force_val_ent;
+ bool force_active;
+ struct dentry *force_active_ent;
+};
+
+/**
+ * vote_set_any()
+ * @votable: votable object
+ * @client_id: client number of the latest voter
+ * @eff_res: sets 0 or 1 based on the voting
+ * @eff_id: Always returns the client_id argument
+ *
+ * Note that for SET_ANY voter, the value is always same as enabled. There is
+ * no idea of a voter abstaining from the election. Hence there is never a
+ * situation when the effective_id will be invalid, during election.
+ *
+ * Context:
+ * Must be called with the votable->lock held
+ */
+static void vote_set_any(struct votable *votable, int client_id,
+ int *eff_res, int *eff_id)
+{
+ int i;
+
+ *eff_res = 0;
+
+ for (i = 0; i < votable->num_clients && votable->client_strs[i]; i++)
+ *eff_res |= votable->votes[i].enabled;
+
+ *eff_id = client_id;
+}
+
+/**
+ * vote_min() -
+ * @votable: votable object
+ * @client_id: client number of the latest voter
+ * @eff_res: sets this to the min. of all the values amongst enabled voters.
+ * If there is no enabled client, this is set to INT_MAX
+ * @eff_id: sets this to the client id that has the min value amongst all
+ * the enabled clients. If there is no enabled client, sets this
+ * to -EINVAL
+ *
+ * Context:
+ * Must be called with the votable->lock held
+ */
+static void vote_min(struct votable *votable, int client_id,
+ int *eff_res, int *eff_id)
+{
+ int i;
+
+ *eff_res = INT_MAX;
+ *eff_id = -EINVAL;
+ for (i = 0; i < votable->num_clients && votable->client_strs[i]; i++) {
+ if (votable->votes[i].enabled
+ && *eff_res > votable->votes[i].value) {
+ *eff_res = votable->votes[i].value;
+ *eff_id = i;
+ }
+ }
+ if (*eff_id == -EINVAL)
+ *eff_res = -EINVAL;
+}
+
+/**
+ * vote_max() -
+ * @votable: votable object
+ * @client_id: client number of the latest voter
+ * @eff_res: sets this to the max. of all the values amongst enabled voters.
+ * If there is no enabled client, this is set to -EINVAL
+ * @eff_id: sets this to the client id that has the max value amongst all
+ * the enabled clients. If there is no enabled client, sets this to
+ * -EINVAL
+ *
+ * Context:
+ * Must be called with the votable->lock held
+ */
+static void vote_max(struct votable *votable, int client_id,
+ int *eff_res, int *eff_id)
+{
+ int i;
+
+ *eff_res = INT_MIN;
+ *eff_id = -EINVAL;
+ for (i = 0; i < votable->num_clients && votable->client_strs[i]; i++) {
+ if (votable->votes[i].enabled &&
+ *eff_res < votable->votes[i].value) {
+ *eff_res = votable->votes[i].value;
+ *eff_id = i;
+ }
+ }
+ if (*eff_id == -EINVAL)
+ *eff_res = -EINVAL;
+}
+
+static int get_client_id(struct votable *votable, const char *client_str)
+{
+ int i;
+
+ for (i = 0; i < votable->num_clients; i++) {
+ if (votable->client_strs[i]
+ && (strcmp(votable->client_strs[i], client_str) == 0))
+ return i;
+ }
+
+ /* new client */
+ for (i = 0; i < votable->num_clients; i++) {
+ if (!votable->client_strs[i]) {
+ votable->client_strs[i]
+ = kstrdup(client_str, GFP_KERNEL);
+ if (!votable->client_strs[i])
+ return -ENOMEM;
+ return i;
+ }
+ }
+ return -EINVAL;
+}
+
+static char *get_client_str(struct votable *votable, int client_id)
+{
+ if (client_id == -EINVAL)
+ return NULL;
+
+ return votable->client_strs[client_id];
+}
+
+void lock_votable(struct votable *votable)
+{
+ mutex_lock(&votable->vote_lock);
+}
+
+void unlock_votable(struct votable *votable)
+{
+ mutex_unlock(&votable->vote_lock);
+}
+
+/**
+ * is_client_vote_enabled() -
+ * is_client_vote_enabled_locked() -
+ * The unlocked and locked variants of getting whether a client's
+ vote is enabled.
+ * @votable: the votable object
+ * @client_str: client of interest
+ *
+ * Returns:
+ * True if the client's vote is enabled; false otherwise.
+ */
+bool is_client_vote_enabled_locked(struct votable *votable,
+ const char *client_str)
+{
+ int client_id = get_client_id(votable, client_str);
+
+ if (client_id < 0)
+ return false;
+
+ return votable->votes[client_id].enabled;
+}
+
+bool is_client_vote_enabled(struct votable *votable, const char *client_str)
+{
+ bool enabled;
+
+ lock_votable(votable);
+ enabled = is_client_vote_enabled_locked(votable, client_str);
+ unlock_votable(votable);
+ return enabled;
+}
+
+/**
+ * get_client_vote() -
+ * get_client_vote_locked() -
+ * The unlocked and locked variants of getting a client's voted
+ * value.
+ * @votable: the votable object
+ * @client_str: client of interest
+ *
+ * Returns:
+ * The value the client voted for. -EINVAL is returned if the client
+ * is not enabled or the client is not found.
+ */
+int get_client_vote_locked(struct votable *votable, const char *client_str)
+{
+ int client_id = get_client_id(votable, client_str);
+
+ if (client_id < 0)
+ return -EINVAL;
+
+ if ((votable->type != VOTE_SET_ANY)
+ && !votable->votes[client_id].enabled)
+ return -EINVAL;
+
+ return votable->votes[client_id].value;
+}
+
+int get_client_vote(struct votable *votable, const char *client_str)
+{
+ int value;
+
+ lock_votable(votable);
+ value = get_client_vote_locked(votable, client_str);
+ unlock_votable(votable);
+ return value;
+}
+
+/**
+ * get_effective_result() -
+ * get_effective_result_locked() -
+ * The unlocked and locked variants of getting the effective value
+ * amongst all the enabled voters.
+ *
+ * @votable: the votable object
+ *
+ * Returns:
+ * The effective result.
+ * For MIN and MAX votable, returns -EINVAL when the votable
+ * object has been created but no clients have casted their votes or
+ * the last enabled client disables its vote.
+ * For SET_ANY votable it returns 0 when no clients have casted their votes
+ * because for SET_ANY there is no concept of abstaining from election. The
+ * votes for all the clients of SET_ANY votable is defaulted to false.
+ */
+int get_effective_result_locked(struct votable *votable)
+{
+ if (votable->force_active)
+ return votable->force_val;
+
+ return votable->effective_result;
+}
+
+int get_effective_result(struct votable *votable)
+{
+ int value;
+
+ lock_votable(votable);
+ value = get_effective_result_locked(votable);
+ unlock_votable(votable);
+ return value;
+}
+
+/**
+ * get_effective_client() -
+ * get_effective_client_locked() -
+ * The unlocked and locked variants of getting the effective client
+ * amongst all the enabled voters.
+ *
+ * @votable: the votable object
+ *
+ * Returns:
+ * The effective client.
+ * For MIN and MAX votable, returns NULL when the votable
+ * object has been created but no clients have casted their votes or
+ * the last enabled client disables its vote.
+ * For SET_ANY votable it returns NULL too when no clients have casted
+ * their votes. But for SET_ANY since there is no concept of abstaining
+ * from election, the only client that casts a vote or the client that
+ * caused the result to change is returned.
+ */
+const char *get_effective_client_locked(struct votable *votable)
+{
+ if (votable->force_active)
+ return DEBUG_FORCE_CLIENT;
+
+ return get_client_str(votable, votable->effective_client_id);
+}
+
+const char *get_effective_client(struct votable *votable)
+{
+ const char *client_str;
+
+ lock_votable(votable);
+ client_str = get_effective_client_locked(votable);
+ unlock_votable(votable);
+ return client_str;
+}
+
+/**
+ * vote() -
+ *
+ * @votable: the votable object
+ * @client_str: the voting client
+ * @enabled: This provides a means for the client to exclude himself from
+ * election. This clients val (the next argument) will be
+ * considered only when he has enabled his participation.
+ * Note that this takes a differnt meaning for SET_ANY type, as
+ * there is no concept of abstaining from participation.
+ * Enabled is treated as the boolean value the client is voting.
+ * @val: The vote value. This is ignored for SET_ANY votable types.
+ * For MIN, MAX votable types this value is used as the
+ * clients vote value when the enabled is true, this value is
+ * ignored if enabled is false.
+ *
+ * The callback is called only when there is a change in the election results or
+ * if it is the first time someone is voting.
+ *
+ * Returns:
+ * The return from the callback when present and needs to be called
+ * or zero.
+ */
+int vote(struct votable *votable, const char *client_str, bool enabled, int val)
+{
+ int effective_id = -EINVAL;
+ int effective_result;
+ int client_id;
+ int rc = 0;
+ bool similar_vote = false;
+
+ lock_votable(votable);
+
+ client_id = get_client_id(votable, client_str);
+ if (client_id < 0) {
+ rc = client_id;
+ goto out;
+ }
+
+ /*
+ * for SET_ANY the val is to be ignored, set it
+ * to enabled so that the election still works based on
+ * value regardless of the type
+ */
+ if (votable->type == VOTE_SET_ANY)
+ val = enabled;
+
+ if ((votable->votes[client_id].enabled == enabled) &&
+ (votable->votes[client_id].value == val)) {
+ pr_debug("%s: %s,%d same vote %s of val=%d\n",
+ votable->name,
+ client_str, client_id,
+ enabled ? "on" : "off",
+ val);
+ similar_vote = true;
+ }
+
+ votable->votes[client_id].enabled = enabled;
+ votable->votes[client_id].value = val;
+
+ if (similar_vote && votable->voted_on) {
+ pr_debug("%s: %s,%d Ignoring similar vote %s of val=%d\n",
+ votable->name,
+ client_str, client_id, enabled ? "on" : "off", val);
+ goto out;
+ }
+
+ pr_debug("%s: %s,%d voting %s of val=%d\n",
+ votable->name,
+ client_str, client_id, enabled ? "on" : "off", val);
+ switch (votable->type) {
+ case VOTE_MIN:
+ vote_min(votable, client_id, &effective_result, &effective_id);
+ break;
+ case VOTE_MAX:
+ vote_max(votable, client_id, &effective_result, &effective_id);
+ break;
+ case VOTE_SET_ANY:
+ vote_set_any(votable, client_id,
+ &effective_result, &effective_id);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Note that the callback is called with a NULL string and -EINVAL
+ * result when there are no enabled votes
+ */
+ if (!votable->voted_on
+ || (effective_result != votable->effective_result)) {
+ votable->effective_client_id = effective_id;
+ votable->effective_result = effective_result;
+ pr_debug("%s: effective vote is now %d voted by %s,%d\n",
+ votable->name, effective_result,
+ get_client_str(votable, effective_id),
+ effective_id);
+ if (votable->callback && !votable->force_active)
+ rc = votable->callback(votable, votable->data,
+ effective_result,
+ get_client_str(votable, effective_id));
+ }
+
+ votable->voted_on = true;
+out:
+ unlock_votable(votable);
+ return rc;
+}
+
+int rerun_election(struct votable *votable)
+{
+ int rc = 0;
+ int effective_result;
+
+ lock_votable(votable);
+ effective_result = get_effective_result_locked(votable);
+ if (votable->callback)
+ rc = votable->callback(votable,
+ votable->data,
+ effective_result,
+ get_client_str(votable, votable->effective_client_id));
+ unlock_votable(votable);
+ return rc;
+}
+
+struct votable *find_votable(const char *name)
+{
+ unsigned long flags;
+ struct votable *v;
+ bool found = false;
+
+ spin_lock_irqsave(&votable_list_slock, flags);
+ if (list_empty(&votable_list))
+ goto out;
+
+ list_for_each_entry(v, &votable_list, list) {
+ if (strcmp(v->name, name) == 0) {
+ found = true;
+ break;
+ }
+ }
+out:
+ spin_unlock_irqrestore(&votable_list_slock, flags);
+
+ if (found)
+ return v;
+ else
+ return NULL;
+}
+
+static int force_active_get(void *data, u64 *val)
+{
+ struct votable *votable = data;
+
+ *val = votable->force_active;
+
+ return 0;
+}
+
+static int force_active_set(void *data, u64 val)
+{
+ struct votable *votable = data;
+ int rc = 0;
+
+ lock_votable(votable);
+ votable->force_active = !!val;
+
+ if (!votable->callback)
+ goto out;
+
+ if (votable->force_active) {
+ rc = votable->callback(votable, votable->data,
+ votable->force_val,
+ DEBUG_FORCE_CLIENT);
+ } else {
+ rc = votable->callback(votable, votable->data,
+ votable->effective_result,
+ get_client_str(votable, votable->effective_client_id));
+ }
+out:
+ unlock_votable(votable);
+ return rc;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(votable_force_ops, force_active_get, force_active_set,
+ "%lld\n");
+
+static int show_votable_clients(struct seq_file *m, void *data)
+{
+ struct votable *votable = m->private;
+ int i;
+ char *type_str = "Unkonwn";
+ const char *effective_client_str;
+
+ lock_votable(votable);
+
+ for (i = 0; i < votable->num_clients; i++) {
+ if (votable->client_strs[i]) {
+ seq_printf(m, "%s: %s:\t\t\ten=%d v=%d\n",
+ votable->name,
+ votable->client_strs[i],
+ votable->votes[i].enabled,
+ votable->votes[i].value);
+ }
+ }
+
+ switch (votable->type) {
+ case VOTE_MIN:
+ type_str = "Min";
+ break;
+ case VOTE_MAX:
+ type_str = "Max";
+ break;
+ case VOTE_SET_ANY:
+ type_str = "Set_any";
+ break;
+ }
+
+ effective_client_str = get_effective_client_locked(votable);
+ seq_printf(m, "%s: effective=%s type=%s v=%d\n",
+ votable->name,
+ effective_client_str ? effective_client_str : "none",
+ type_str,
+ get_effective_result_locked(votable));
+ unlock_votable(votable);
+
+ return 0;
+}
+
+static int votable_status_open(struct inode *inode, struct file *file)
+{
+ struct votable *votable = inode->i_private;
+
+ return single_open(file, show_votable_clients, votable);
+}
+
+static const struct file_operations votable_status_ops = {
+ .owner = THIS_MODULE,
+ .open = votable_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+struct votable *create_votable(const char *name,
+ int votable_type,
+ int (*callback)(struct votable *votable,
+ void *data,
+ int effective_result,
+ const char *effective_client),
+ void *data)
+{
+ struct votable *votable;
+ unsigned long flags;
+
+ votable = find_votable(name);
+ if (votable)
+ return ERR_PTR(-EEXIST);
+
+ if (debug_root == NULL) {
+ debug_root = debugfs_create_dir("pmic-votable", NULL);
+ if (!debug_root) {
+ pr_err("Couldn't create debug dir\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ if (votable_type >= NUM_VOTABLE_TYPES) {
+ pr_err("Invalid votable_type specified for voter\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ votable = kzalloc(sizeof(struct votable), GFP_KERNEL);
+ if (!votable)
+ return ERR_PTR(-ENOMEM);
+
+ votable->name = kstrdup(name, GFP_KERNEL);
+ if (!votable->name) {
+ kfree(votable);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ votable->num_clients = NUM_MAX_CLIENTS;
+ votable->callback = callback;
+ votable->type = votable_type;
+ votable->data = data;
+ mutex_init(&votable->vote_lock);
+
+ /*
+ * Because effective_result and client states are invalid
+ * before the first vote, initialize them to -EINVAL
+ */
+ votable->effective_result = -EINVAL;
+ if (votable->type == VOTE_SET_ANY)
+ votable->effective_result = 0;
+ votable->effective_client_id = -EINVAL;
+
+ spin_lock_irqsave(&votable_list_slock, flags);
+ list_add(&votable->list, &votable_list);
+ spin_unlock_irqrestore(&votable_list_slock, flags);
+
+ votable->root = debugfs_create_dir(name, debug_root);
+ if (!votable->root) {
+ pr_err("Couldn't create debug dir %s\n", name);
+ kfree(votable->name);
+ kfree(votable);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ votable->status_ent = debugfs_create_file("status", S_IFREG | 0444,
+ votable->root, votable,
+ &votable_status_ops);
+ if (!votable->status_ent) {
+ pr_err("Couldn't create status dbg file for %s\n", name);
+ debugfs_remove_recursive(votable->root);
+ kfree(votable->name);
+ kfree(votable);
+ return ERR_PTR(-EEXIST);
+ }
+
+ votable->force_val_ent = debugfs_create_u32("force_val",
+ S_IFREG | 0644,
+ votable->root,
+ &(votable->force_val));
+
+ if (!votable->force_val_ent) {
+ pr_err("Couldn't create force_val dbg file for %s\n", name);
+ debugfs_remove_recursive(votable->root);
+ kfree(votable->name);
+ kfree(votable);
+ return ERR_PTR(-EEXIST);
+ }
+
+ votable->force_active_ent = debugfs_create_file("force_active",
+ S_IFREG | 0444,
+ votable->root, votable,
+ &votable_force_ops);
+ if (!votable->force_active_ent) {
+ pr_err("Couldn't create force_active dbg file for %s\n", name);
+ debugfs_remove_recursive(votable->root);
+ kfree(votable->name);
+ kfree(votable);
+ return ERR_PTR(-EEXIST);
+ }
+
+ return votable;
+}
+
+void destroy_votable(struct votable *votable)
+{
+ unsigned long flags;
+ int i;
+
+ if (!votable)
+ return;
+
+ spin_lock_irqsave(&votable_list_slock, flags);
+ list_del(&votable->list);
+ spin_unlock_irqrestore(&votable_list_slock, flags);
+
+ debugfs_remove_recursive(votable->root);
+
+ for (i = 0; i < votable->num_clients && votable->client_strs[i]; i++)
+ kfree(votable->client_strs[i]);
+
+ kfree(votable->name);
+ kfree(votable);
+}
diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c
new file mode 100644
index 0000000..a9c7040
--- /dev/null
+++ b/drivers/power/supply/qcom/qpnp-smb5.c
@@ -0,0 +1,2939 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/power_supply.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/log2.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/machine.h>
+#include <linux/iio/consumer.h>
+#include <linux/pmic-voter.h>
+#include "smb5-reg.h"
+#include "smb5-lib.h"
+#include "schgm-flash.h"
+
+static struct smb_params smb5_pmi632_params = {
+ .fcc = {
+ .name = "fast charge current",
+ .reg = CHGR_FAST_CHARGE_CURRENT_CFG_REG,
+ .min_u = 0,
+ .max_u = 3000000,
+ .step_u = 50000,
+ },
+ .fv = {
+ .name = "float voltage",
+ .reg = CHGR_FLOAT_VOLTAGE_CFG_REG,
+ .min_u = 3600000,
+ .max_u = 4800000,
+ .step_u = 10000,
+ },
+ .usb_icl = {
+ .name = "usb input current limit",
+ .reg = USBIN_CURRENT_LIMIT_CFG_REG,
+ .min_u = 0,
+ .max_u = 3000000,
+ .step_u = 50000,
+ },
+ .icl_max_stat = {
+ .name = "dcdc icl max status",
+ .reg = ICL_MAX_STATUS_REG,
+ .min_u = 0,
+ .max_u = 3000000,
+ .step_u = 50000,
+ },
+ .icl_stat = {
+ .name = "input current limit status",
+ .reg = AICL_ICL_STATUS_REG,
+ .min_u = 0,
+ .max_u = 3000000,
+ .step_u = 50000,
+ },
+ .otg_cl = {
+ .name = "usb otg current limit",
+ .reg = DCDC_OTG_CURRENT_LIMIT_CFG_REG,
+ .min_u = 500000,
+ .max_u = 1000000,
+ .step_u = 250000,
+ },
+ .dc_icl = {
+ .name = "DC input current limit",
+ .reg = DCDC_CFG_REF_MAX_PSNS_REG,
+ .min_u = 0,
+ .max_u = 1500000,
+ .step_u = 50000,
+ },
+ .jeita_cc_comp_hot = {
+ .name = "jeita fcc reduction",
+ .reg = JEITA_CCCOMP_CFG_HOT_REG,
+ .min_u = 0,
+ .max_u = 1575000,
+ .step_u = 25000,
+ },
+ .jeita_cc_comp_cold = {
+ .name = "jeita fcc reduction",
+ .reg = JEITA_CCCOMP_CFG_COLD_REG,
+ .min_u = 0,
+ .max_u = 1575000,
+ .step_u = 25000,
+ },
+ .freq_switcher = {
+ .name = "switching frequency",
+ .reg = DCDC_FSW_SEL_REG,
+ .min_u = 600,
+ .max_u = 1200,
+ .step_u = 400,
+ .set_proc = smblib_set_chg_freq,
+ },
+};
+
+static struct smb_params smb5_pm8150b_params = {
+ .fcc = {
+ .name = "fast charge current",
+ .reg = CHGR_FAST_CHARGE_CURRENT_CFG_REG,
+ .min_u = 0,
+ .max_u = 8000000,
+ .step_u = 50000,
+ },
+ .fv = {
+ .name = "float voltage",
+ .reg = CHGR_FLOAT_VOLTAGE_CFG_REG,
+ .min_u = 3600000,
+ .max_u = 4790000,
+ .step_u = 10000,
+ },
+ .usb_icl = {
+ .name = "usb input current limit",
+ .reg = USBIN_CURRENT_LIMIT_CFG_REG,
+ .min_u = 0,
+ .max_u = 5000000,
+ .step_u = 50000,
+ },
+ .icl_max_stat = {
+ .name = "dcdc icl max status",
+ .reg = ICL_MAX_STATUS_REG,
+ .min_u = 0,
+ .max_u = 5000000,
+ .step_u = 50000,
+ },
+ .icl_stat = {
+ .name = "aicl icl status",
+ .reg = AICL_ICL_STATUS_REG,
+ .min_u = 0,
+ .max_u = 5000000,
+ .step_u = 50000,
+ },
+ .otg_cl = {
+ .name = "usb otg current limit",
+ .reg = DCDC_OTG_CURRENT_LIMIT_CFG_REG,
+ .min_u = 500000,
+ .max_u = 3000000,
+ .step_u = 500000,
+ },
+ .dc_icl = {
+ .name = "DC input current limit",
+ .reg = DCDC_CFG_REF_MAX_PSNS_REG,
+ .min_u = 0,
+ .max_u = 1500000,
+ .step_u = 50000,
+ },
+ .jeita_cc_comp_hot = {
+ .name = "jeita fcc reduction",
+ .reg = JEITA_CCCOMP_CFG_HOT_REG,
+ .min_u = 0,
+ .max_u = 8000000,
+ .step_u = 25000,
+ .set_proc = NULL,
+ },
+ .jeita_cc_comp_cold = {
+ .name = "jeita fcc reduction",
+ .reg = JEITA_CCCOMP_CFG_COLD_REG,
+ .min_u = 0,
+ .max_u = 8000000,
+ .step_u = 25000,
+ .set_proc = NULL,
+ },
+ .freq_switcher = {
+ .name = "switching frequency",
+ .reg = DCDC_FSW_SEL_REG,
+ .min_u = 600,
+ .max_u = 1200,
+ .step_u = 400,
+ .set_proc = smblib_set_chg_freq,
+ },
+};
+
+struct smb_dt_props {
+ int usb_icl_ua;
+ struct device_node *revid_dev_node;
+ enum float_options float_option;
+ int chg_inhibit_thr_mv;
+ bool no_battery;
+ bool hvdcp_disable;
+ int sec_charger_config;
+ int auto_recharge_soc;
+ int auto_recharge_vbat_mv;
+ int wd_bark_time;
+ int batt_profile_fcc_ua;
+ int batt_profile_fv_uv;
+ int term_current_src;
+ int term_current_thresh_hi_ma;
+ int term_current_thresh_lo_ma;
+};
+
+struct smb5 {
+ struct smb_charger chg;
+ struct dentry *dfs_root;
+ struct smb_dt_props dt;
+};
+
+static int __debug_mask;
+
+static ssize_t pd_disabled_show(struct device *dev, struct device_attribute
+ *attr, char *buf)
+{
+ struct smb5 *chip = dev_get_drvdata(dev);
+ struct smb_charger *chg = &chip->chg;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", chg->pd_disabled);
+}
+
+static ssize_t pd_disabled_store(struct device *dev, struct device_attribute
+ *attr, const char *buf, size_t count)
+{
+ int val;
+ struct smb5 *chip = dev_get_drvdata(dev);
+ struct smb_charger *chg = &chip->chg;
+
+ if (kstrtos32(buf, 0, &val))
+ return -EINVAL;
+
+ chg->pd_disabled = val;
+
+ return count;
+}
+static DEVICE_ATTR_RW(pd_disabled);
+
+static ssize_t weak_chg_icl_ua_show(struct device *dev, struct device_attribute
+ *attr, char *buf)
+{
+ struct smb5 *chip = dev_get_drvdata(dev);
+ struct smb_charger *chg = &chip->chg;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", chg->weak_chg_icl_ua);
+}
+
+static ssize_t weak_chg_icl_ua_store(struct device *dev, struct device_attribute
+ *attr, const char *buf, size_t count)
+{
+ int val;
+ struct smb5 *chip = dev_get_drvdata(dev);
+ struct smb_charger *chg = &chip->chg;
+
+ if (kstrtos32(buf, 0, &val))
+ return -EINVAL;
+
+ chg->weak_chg_icl_ua = val;
+
+ return count;
+}
+static DEVICE_ATTR_RW(weak_chg_icl_ua);
+
+static struct attribute *smb5_attrs[] = {
+ &dev_attr_pd_disabled.attr,
+ &dev_attr_weak_chg_icl_ua.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(smb5);
+
+#define PMI632_MAX_ICL_UA 3000000
+#define PM6150_MAX_FCC_UA 3000000
+static int smb5_chg_config_init(struct smb5 *chip)
+{
+ struct smb_charger *chg = &chip->chg;
+ struct pmic_revid_data *pmic_rev_id;
+ struct device_node *revid_dev_node;
+ int rc = 0;
+
+ revid_dev_node = of_parse_phandle(chip->chg.dev->of_node,
+ "qcom,pmic-revid", 0);
+ if (!revid_dev_node) {
+ pr_err("Missing qcom,pmic-revid property\n");
+ return -EINVAL;
+ }
+
+ pmic_rev_id = get_revid_data(revid_dev_node);
+ if (IS_ERR_OR_NULL(pmic_rev_id)) {
+ /*
+ * the revid peripheral must be registered, any failure
+ * here only indicates that the rev-id module has not
+ * probed yet.
+ */
+ rc = -EPROBE_DEFER;
+ goto out;
+ }
+
+ switch (pmic_rev_id->pmic_subtype) {
+ case PM8150B_SUBTYPE:
+ chip->chg.smb_version = PM8150B_SUBTYPE;
+ chg->param = smb5_pm8150b_params;
+ chg->name = "pm8150b_charger";
+ break;
+ case PM6150_SUBTYPE:
+ chip->chg.smb_version = PM6150_SUBTYPE;
+ chg->param = smb5_pm8150b_params;
+ chg->name = "pm6150_charger";
+ chg->wa_flags |= SW_THERM_REGULATION_WA;
+ chg->main_fcc_max = PM6150_MAX_FCC_UA;
+ break;
+ case PMI632_SUBTYPE:
+ chip->chg.smb_version = PMI632_SUBTYPE;
+ chg->param = smb5_pmi632_params;
+ chg->use_extcon = true;
+ chg->name = "pmi632_charger";
+ /* PMI632 does not support PD */
+ chg->pd_not_supported = true;
+ chg->hw_max_icl_ua =
+ (chip->dt.usb_icl_ua > 0) ? chip->dt.usb_icl_ua
+ : PMI632_MAX_ICL_UA;
+ break;
+ default:
+ pr_err("PMIC subtype %d not supported\n",
+ pmic_rev_id->pmic_subtype);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ chg->chg_freq.freq_5V = 600;
+ chg->chg_freq.freq_6V_8V = 800;
+ chg->chg_freq.freq_9V = 1050;
+ chg->chg_freq.freq_12V = 1200;
+ chg->chg_freq.freq_removal = 1050;
+ chg->chg_freq.freq_below_otg_threshold = 800;
+ chg->chg_freq.freq_above_otg_threshold = 800;
+
+out:
+ of_node_put(revid_dev_node);
+ return rc;
+}
+
+#define MICRO_1P5A 1500000
+#define MICRO_P1A 100000
+#define MICRO_1PA 1000000
+#define OTG_DEFAULT_DEGLITCH_TIME_MS 50
+#define DEFAULT_WD_BARK_TIME 64
+static int smb5_parse_dt(struct smb5 *chip)
+{
+ struct smb_charger *chg = &chip->chg;
+ struct device_node *node = chg->dev->of_node;
+ int rc, byte_len;
+
+ if (!node) {
+ pr_err("device tree node missing\n");
+ return -EINVAL;
+ }
+
+ of_property_read_u32(node, "qcom,sec-charger-config",
+ &chip->dt.sec_charger_config);
+ chg->sec_cp_present =
+ chip->dt.sec_charger_config == POWER_SUPPLY_CHARGER_SEC_CP ||
+ chip->dt.sec_charger_config == POWER_SUPPLY_CHARGER_SEC_CP_PL;
+
+ chg->sec_pl_present =
+ chip->dt.sec_charger_config == POWER_SUPPLY_CHARGER_SEC_PL ||
+ chip->dt.sec_charger_config == POWER_SUPPLY_CHARGER_SEC_CP_PL;
+
+ chg->step_chg_enabled = of_property_read_bool(node,
+ "qcom,step-charging-enable");
+
+ chg->sw_jeita_enabled = of_property_read_bool(node,
+ "qcom,sw-jeita-enable");
+
+ rc = of_property_read_u32(node, "qcom,wd-bark-time-secs",
+ &chip->dt.wd_bark_time);
+ if (rc < 0 || chip->dt.wd_bark_time < MIN_WD_BARK_TIME)
+ chip->dt.wd_bark_time = DEFAULT_WD_BARK_TIME;
+
+ chip->dt.no_battery = of_property_read_bool(node,
+ "qcom,batteryless-platform");
+
+ rc = of_property_read_u32(node,
+ "qcom,fcc-max-ua", &chip->dt.batt_profile_fcc_ua);
+ if (rc < 0)
+ chip->dt.batt_profile_fcc_ua = -EINVAL;
+
+ rc = of_property_read_u32(node,
+ "qcom,fv-max-uv", &chip->dt.batt_profile_fv_uv);
+ if (rc < 0)
+ chip->dt.batt_profile_fv_uv = -EINVAL;
+
+ rc = of_property_read_u32(node,
+ "qcom,usb-icl-ua", &chip->dt.usb_icl_ua);
+ if (rc < 0)
+ chip->dt.usb_icl_ua = -EINVAL;
+
+ rc = of_property_read_u32(node,
+ "qcom,otg-cl-ua", &chg->otg_cl_ua);
+ if (rc < 0)
+ chg->otg_cl_ua = (chip->chg.smb_version == PMI632_SUBTYPE) ?
+ MICRO_1PA : MICRO_1P5A;
+
+ rc = of_property_read_u32(node, "qcom,chg-term-src",
+ &chip->dt.term_current_src);
+ if (rc < 0)
+ chip->dt.term_current_src = ITERM_SRC_UNSPECIFIED;
+
+ rc = of_property_read_u32(node, "qcom,chg-term-current-ma",
+ &chip->dt.term_current_thresh_hi_ma);
+
+ if (chip->dt.term_current_src == ITERM_SRC_ADC)
+ rc = of_property_read_u32(node, "qcom,chg-term-base-current-ma",
+ &chip->dt.term_current_thresh_lo_ma);
+
+ if (of_find_property(node, "qcom,thermal-mitigation", &byte_len)) {
+ chg->thermal_mitigation = devm_kzalloc(chg->dev, byte_len,
+ GFP_KERNEL);
+
+ if (chg->thermal_mitigation == NULL)
+ return -ENOMEM;
+
+ chg->thermal_levels = byte_len / sizeof(u32);
+ rc = of_property_read_u32_array(node,
+ "qcom,thermal-mitigation",
+ chg->thermal_mitigation,
+ chg->thermal_levels);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't read threm limits rc = %d\n", rc);
+ return rc;
+ }
+ }
+
+ rc = of_property_read_u32(node, "qcom,charger-temp-max",
+ &chg->charger_temp_max);
+ if (rc < 0)
+ chg->charger_temp_max = -EINVAL;
+
+ rc = of_property_read_u32(node, "qcom,smb-temp-max",
+ &chg->smb_temp_max);
+ if (rc < 0)
+ chg->smb_temp_max = -EINVAL;
+
+ rc = of_property_read_u32(node, "qcom,float-option",
+ &chip->dt.float_option);
+ if (!rc && (chip->dt.float_option < 0 || chip->dt.float_option > 4)) {
+ pr_err("qcom,float-option is out of range [0, 4]\n");
+ return -EINVAL;
+ }
+
+ chip->dt.hvdcp_disable = of_property_read_bool(node,
+ "qcom,hvdcp-disable");
+
+
+ rc = of_property_read_u32(node, "qcom,chg-inhibit-threshold-mv",
+ &chip->dt.chg_inhibit_thr_mv);
+ if (!rc && (chip->dt.chg_inhibit_thr_mv < 0 ||
+ chip->dt.chg_inhibit_thr_mv > 300)) {
+ pr_err("qcom,chg-inhibit-threshold-mv is incorrect\n");
+ return -EINVAL;
+ }
+
+ chip->dt.auto_recharge_soc = -EINVAL;
+ rc = of_property_read_u32(node, "qcom,auto-recharge-soc",
+ &chip->dt.auto_recharge_soc);
+ if (!rc && (chip->dt.auto_recharge_soc < 0 ||
+ chip->dt.auto_recharge_soc > 100)) {
+ pr_err("qcom,auto-recharge-soc is incorrect\n");
+ return -EINVAL;
+ }
+ chg->auto_recharge_soc = chip->dt.auto_recharge_soc;
+
+ chip->dt.auto_recharge_vbat_mv = -EINVAL;
+ rc = of_property_read_u32(node, "qcom,auto-recharge-vbat-mv",
+ &chip->dt.auto_recharge_vbat_mv);
+ if (!rc && (chip->dt.auto_recharge_vbat_mv < 0)) {
+ pr_err("qcom,auto-recharge-vbat-mv is incorrect\n");
+ return -EINVAL;
+ }
+
+ chg->dcp_icl_ua = chip->dt.usb_icl_ua;
+
+ chg->suspend_input_on_debug_batt = of_property_read_bool(node,
+ "qcom,suspend-input-on-debug-batt");
+
+ rc = of_property_read_u32(node, "qcom,otg-deglitch-time-ms",
+ &chg->otg_delay_ms);
+ if (rc < 0)
+ chg->otg_delay_ms = OTG_DEFAULT_DEGLITCH_TIME_MS;
+
+ chg->fcc_stepper_enable = of_property_read_bool(node,
+ "qcom,fcc-stepping-enable");
+
+ /* Extract ADC channels */
+ rc = smblib_get_iio_channel(chg, "mid_voltage", &chg->iio.mid_chan);
+ if (rc < 0)
+ return rc;
+
+ if (!chg->iio.mid_chan) {
+ rc = smblib_get_iio_channel(chg, "usb_in_voltage",
+ &chg->iio.usbin_v_chan);
+ if (rc < 0)
+ return rc;
+
+ if (!chg->iio.usbin_v_chan) {
+ dev_err(chg->dev, "No voltage channel defined\n");
+ return -EINVAL;
+ }
+ }
+
+ rc = smblib_get_iio_channel(chg, "chg_temp", &chg->iio.temp_chan);
+ if (rc < 0)
+ return rc;
+
+ rc = smblib_get_iio_channel(chg, "usb_in_current",
+ &chg->iio.usbin_i_chan);
+ if (rc < 0)
+ return rc;
+
+ rc = smblib_get_iio_channel(chg, "sbux_res", &chg->iio.sbux_chan);
+ if (rc < 0)
+ return rc;
+
+ rc = smblib_get_iio_channel(chg, "vph_voltage", &chg->iio.vph_v_chan);
+ if (rc < 0)
+ return rc;
+
+ rc = smblib_get_iio_channel(chg, "die_temp", &chg->iio.die_temp_chan);
+ if (rc < 0)
+ return rc;
+
+ rc = smblib_get_iio_channel(chg, "conn_temp",
+ &chg->iio.connector_temp_chan);
+ if (rc < 0)
+ return rc;
+
+ rc = smblib_get_iio_channel(chg, "skin_temp", &chg->iio.skin_temp_chan);
+ if (rc < 0)
+ return rc;
+
+ rc = smblib_get_iio_channel(chg, "smb_temp", &chg->iio.smb_temp_chan);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+/************************
+ * USB PSY REGISTRATION *
+ ************************/
+static enum power_supply_property smb5_usb_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_PD_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_TYPE,
+ POWER_SUPPLY_PROP_TYPEC_MODE,
+ POWER_SUPPLY_PROP_TYPEC_POWER_ROLE,
+ POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION,
+ POWER_SUPPLY_PROP_TYPEC_SRC_RP,
+ POWER_SUPPLY_PROP_LOW_POWER,
+ POWER_SUPPLY_PROP_PD_ACTIVE,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_NOW,
+ POWER_SUPPLY_PROP_BOOST_CURRENT,
+ POWER_SUPPLY_PROP_PE_START,
+ POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
+ POWER_SUPPLY_PROP_HW_CURRENT_MAX,
+ POWER_SUPPLY_PROP_REAL_TYPE,
+ POWER_SUPPLY_PROP_PR_SWAP,
+ POWER_SUPPLY_PROP_PD_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_PD_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_SDP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CONNECTOR_TYPE,
+ POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_SMB_EN_MODE,
+ POWER_SUPPLY_PROP_SMB_EN_REASON,
+ POWER_SUPPLY_PROP_SCOPE,
+};
+
+static int smb5_usb_get_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct smb5 *chip = power_supply_get_drvdata(psy);
+ struct smb_charger *chg = &chip->chg;
+ union power_supply_propval pval;
+ int rc = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ rc = smblib_get_prop_usb_present(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ rc = smblib_get_prop_usb_online(chg, val);
+ if (!val->intval)
+ break;
+
+ if (((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT) ||
+ (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB))
+ && (chg->real_charger_type == POWER_SUPPLY_TYPE_USB))
+ val->intval = 0;
+ else
+ val->intval = 1;
+
+ if (chg->real_charger_type == POWER_SUPPLY_TYPE_UNKNOWN)
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ rc = smblib_get_prop_usb_voltage_max(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ rc = smblib_get_prop_usb_voltage_now(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_PD_CURRENT_MAX:
+ val->intval = get_client_vote(chg->usb_icl_votable, PD_VOTER);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ rc = smblib_get_prop_input_current_settled(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_TYPE:
+ val->intval = POWER_SUPPLY_TYPE_USB_PD;
+ break;
+ case POWER_SUPPLY_PROP_REAL_TYPE:
+ val->intval = chg->real_charger_type;
+ break;
+ case POWER_SUPPLY_PROP_TYPEC_MODE:
+ if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
+ val->intval = POWER_SUPPLY_TYPEC_NONE;
+ else
+ val->intval = chg->typec_mode;
+ break;
+ case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
+ if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
+ val->intval = POWER_SUPPLY_TYPEC_PR_NONE;
+ else
+ rc = smblib_get_prop_typec_power_role(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION:
+ if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
+ val->intval = 0;
+ else
+ rc = smblib_get_prop_typec_cc_orientation(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_TYPEC_SRC_RP:
+ rc = smblib_get_prop_typec_select_rp(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_LOW_POWER:
+ if (chg->sink_src_mode == SRC_MODE)
+ rc = smblib_get_prop_low_power(chg, val);
+ else
+ rc = -ENODATA;
+ break;
+ case POWER_SUPPLY_PROP_PD_ACTIVE:
+ val->intval = chg->pd_active;
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED:
+ rc = smblib_get_prop_input_current_settled(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_NOW:
+ rc = smblib_get_prop_usb_current_now(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_BOOST_CURRENT:
+ val->intval = chg->boost_current_ua;
+ break;
+ case POWER_SUPPLY_PROP_PD_IN_HARD_RESET:
+ rc = smblib_get_prop_pd_in_hard_reset(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED:
+ val->intval = chg->system_suspend_supported;
+ break;
+ case POWER_SUPPLY_PROP_PE_START:
+ rc = smblib_get_pe_start(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_CTM_CURRENT_MAX:
+ val->intval = get_client_vote(chg->usb_icl_votable, CTM_VOTER);
+ break;
+ case POWER_SUPPLY_PROP_HW_CURRENT_MAX:
+ rc = smblib_get_charge_current(chg, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_PR_SWAP:
+ rc = smblib_get_prop_pr_swap_in_progress(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_PD_VOLTAGE_MAX:
+ val->intval = chg->voltage_max_uv;
+ break;
+ case POWER_SUPPLY_PROP_PD_VOLTAGE_MIN:
+ val->intval = chg->voltage_min_uv;
+ break;
+ case POWER_SUPPLY_PROP_SDP_CURRENT_MAX:
+ val->intval = get_client_vote(chg->usb_icl_votable,
+ USB_PSY_VOTER);
+ break;
+ case POWER_SUPPLY_PROP_CONNECTOR_TYPE:
+ val->intval = chg->connector_type;
+ break;
+ case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
+ if (chg->connector_health == -EINVAL)
+ val->intval = smblib_get_prop_connector_health(chg);
+ else
+ val->intval = chg->connector_health;
+ break;
+ case POWER_SUPPLY_PROP_SCOPE:
+ val->intval = POWER_SUPPLY_SCOPE_UNKNOWN;
+ rc = smblib_get_prop_usb_present(chg, &pval);
+ if (rc < 0)
+ break;
+ val->intval = pval.intval ? POWER_SUPPLY_SCOPE_DEVICE
+ : chg->otg_present ? POWER_SUPPLY_SCOPE_SYSTEM
+ : POWER_SUPPLY_SCOPE_UNKNOWN;
+ break;
+ case POWER_SUPPLY_PROP_SMB_EN_MODE:
+ mutex_lock(&chg->smb_lock);
+ val->intval = chg->sec_chg_selected;
+ mutex_unlock(&chg->smb_lock);
+ break;
+ case POWER_SUPPLY_PROP_SMB_EN_REASON:
+ val->intval = chg->cp_reason;
+ break;
+ default:
+ pr_err("get prop %d is not supported in usb\n", psp);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc < 0) {
+ pr_debug("Couldn't get prop %d rc = %d\n", psp, rc);
+ return -ENODATA;
+ }
+
+ return 0;
+}
+
+static int smb5_usb_set_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct smb5 *chip = power_supply_get_drvdata(psy);
+ struct smb_charger *chg = &chip->chg;
+ int rc = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PD_CURRENT_MAX:
+ rc = smblib_set_prop_pd_current_max(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
+ rc = smblib_set_prop_typec_power_role(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_TYPEC_SRC_RP:
+ rc = smblib_set_prop_typec_select_rp(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_PD_ACTIVE:
+ rc = smblib_set_prop_pd_active(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_PD_IN_HARD_RESET:
+ rc = smblib_set_prop_pd_in_hard_reset(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED:
+ chg->system_suspend_supported = val->intval;
+ break;
+ case POWER_SUPPLY_PROP_BOOST_CURRENT:
+ rc = smblib_set_prop_boost_current(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_CTM_CURRENT_MAX:
+ rc = vote(chg->usb_icl_votable, CTM_VOTER,
+ val->intval >= 0, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_PR_SWAP:
+ rc = smblib_set_prop_pr_swap_in_progress(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_PD_VOLTAGE_MAX:
+ rc = smblib_set_prop_pd_voltage_max(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_PD_VOLTAGE_MIN:
+ rc = smblib_set_prop_pd_voltage_min(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_SDP_CURRENT_MAX:
+ rc = smblib_set_prop_sdp_current_max(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
+ chg->connector_health = val->intval;
+ power_supply_changed(chg->usb_psy);
+ break;
+ default:
+ pr_err("set prop %d is not supported\n", psp);
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static int smb5_usb_prop_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CTM_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
+ return 1;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const struct power_supply_desc usb_psy_desc = {
+ .name = "usb",
+ .type = POWER_SUPPLY_TYPE_USB_PD,
+ .properties = smb5_usb_props,
+ .num_properties = ARRAY_SIZE(smb5_usb_props),
+ .get_property = smb5_usb_get_prop,
+ .set_property = smb5_usb_set_prop,
+ .property_is_writeable = smb5_usb_prop_is_writeable,
+};
+
+static int smb5_init_usb_psy(struct smb5 *chip)
+{
+ struct power_supply_config usb_cfg = {};
+ struct smb_charger *chg = &chip->chg;
+
+ usb_cfg.drv_data = chip;
+ usb_cfg.of_node = chg->dev->of_node;
+ chg->usb_psy = devm_power_supply_register(chg->dev,
+ &usb_psy_desc,
+ &usb_cfg);
+ if (IS_ERR(chg->usb_psy)) {
+ pr_err("Couldn't register USB power supply\n");
+ return PTR_ERR(chg->usb_psy);
+ }
+
+ return 0;
+}
+
+/********************************
+ * USB PC_PORT PSY REGISTRATION *
+ ********************************/
+static enum power_supply_property smb5_usb_port_props[] = {
+ POWER_SUPPLY_PROP_TYPE,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+};
+
+static int smb5_usb_port_get_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct smb5 *chip = power_supply_get_drvdata(psy);
+ struct smb_charger *chg = &chip->chg;
+ int rc = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_TYPE:
+ val->intval = POWER_SUPPLY_TYPE_USB;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ rc = smblib_get_prop_usb_online(chg, val);
+ if (!val->intval)
+ break;
+
+ if (((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT) ||
+ (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB))
+ && (chg->real_charger_type == POWER_SUPPLY_TYPE_USB))
+ val->intval = 1;
+ else
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ val->intval = 5000000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ rc = smblib_get_prop_input_current_settled(chg, val);
+ break;
+ default:
+ pr_err_ratelimited("Get prop %d is not supported in pc_port\n",
+ psp);
+ return -EINVAL;
+ }
+
+ if (rc < 0) {
+ pr_debug("Couldn't get prop %d rc = %d\n", psp, rc);
+ return -ENODATA;
+ }
+
+ return 0;
+}
+
+static int smb5_usb_port_set_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ int rc = 0;
+
+ switch (psp) {
+ default:
+ pr_err_ratelimited("Set prop %d is not supported in pc_port\n",
+ psp);
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static const struct power_supply_desc usb_port_psy_desc = {
+ .name = "pc_port",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .properties = smb5_usb_port_props,
+ .num_properties = ARRAY_SIZE(smb5_usb_port_props),
+ .get_property = smb5_usb_port_get_prop,
+ .set_property = smb5_usb_port_set_prop,
+};
+
+static int smb5_init_usb_port_psy(struct smb5 *chip)
+{
+ struct power_supply_config usb_port_cfg = {};
+ struct smb_charger *chg = &chip->chg;
+
+ usb_port_cfg.drv_data = chip;
+ usb_port_cfg.of_node = chg->dev->of_node;
+ chg->usb_port_psy = devm_power_supply_register(chg->dev,
+ &usb_port_psy_desc,
+ &usb_port_cfg);
+ if (IS_ERR(chg->usb_port_psy)) {
+ pr_err("Couldn't register USB pc_port power supply\n");
+ return PTR_ERR(chg->usb_port_psy);
+ }
+
+ return 0;
+}
+
+/*****************************
+ * USB MAIN PSY REGISTRATION *
+ *****************************/
+
+static enum power_supply_property smb5_usb_main_props[] = {
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_TYPE,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+ POWER_SUPPLY_PROP_INPUT_VOLTAGE_SETTLED,
+ POWER_SUPPLY_PROP_FCC_DELTA,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_FLASH_ACTIVE,
+ POWER_SUPPLY_PROP_FLASH_TRIGGER,
+ POWER_SUPPLY_PROP_TOGGLE_STAT,
+ POWER_SUPPLY_PROP_MAIN_FCC_MAX,
+};
+
+static int smb5_usb_main_get_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct smb5 *chip = power_supply_get_drvdata(psy);
+ struct smb_charger *chg = &chip->chg;
+ int rc = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ rc = smblib_get_charge_param(chg, &chg->param.fv, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ rc = smblib_get_charge_param(chg, &chg->param.fcc,
+ &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_TYPE:
+ val->intval = POWER_SUPPLY_TYPE_MAIN;
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED:
+ rc = smblib_get_prop_input_current_settled(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_INPUT_VOLTAGE_SETTLED:
+ rc = smblib_get_prop_input_voltage_settled(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_FCC_DELTA:
+ rc = smblib_get_prop_fcc_delta(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ rc = smblib_get_icl_current(chg, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_FLASH_ACTIVE:
+ val->intval = chg->flash_active;
+ break;
+ case POWER_SUPPLY_PROP_FLASH_TRIGGER:
+ rc = schgm_flash_get_vreg_ok(chg, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_TOGGLE_STAT:
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_MAIN_FCC_MAX:
+ val->intval = chg->main_fcc_max;
+ break;
+ default:
+ pr_debug("get prop %d is not supported in usb-main\n", psp);
+ rc = -EINVAL;
+ break;
+ }
+ if (rc < 0) {
+ pr_debug("Couldn't get prop %d rc = %d\n", psp, rc);
+ return -ENODATA;
+ }
+
+ return 0;
+}
+
+static int smb5_usb_main_set_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct smb5 *chip = power_supply_get_drvdata(psy);
+ struct smb_charger *chg = &chip->chg;
+ int rc = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ rc = smblib_set_charge_param(chg, &chg->param.fv, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ rc = smblib_set_charge_param(chg, &chg->param.fcc, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ rc = smblib_set_icl_current(chg, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_FLASH_ACTIVE:
+ chg->flash_active = val->intval;
+ break;
+ case POWER_SUPPLY_PROP_TOGGLE_STAT:
+ rc = smblib_toggle_smb_en(chg, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_MAIN_FCC_MAX:
+ chg->main_fcc_max = val->intval;
+ rerun_election(chg->fcc_votable);
+ break;
+ default:
+ pr_err("set prop %d is not supported\n", psp);
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static int smb5_usb_main_prop_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ int rc;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_TOGGLE_STAT:
+ case POWER_SUPPLY_PROP_MAIN_FCC_MAX:
+ rc = 1;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+
+ return rc;
+}
+
+static const struct power_supply_desc usb_main_psy_desc = {
+ .name = "main",
+ .type = POWER_SUPPLY_TYPE_MAIN,
+ .properties = smb5_usb_main_props,
+ .num_properties = ARRAY_SIZE(smb5_usb_main_props),
+ .get_property = smb5_usb_main_get_prop,
+ .set_property = smb5_usb_main_set_prop,
+ .property_is_writeable = smb5_usb_main_prop_is_writeable,
+};
+
+static int smb5_init_usb_main_psy(struct smb5 *chip)
+{
+ struct power_supply_config usb_main_cfg = {};
+ struct smb_charger *chg = &chip->chg;
+
+ usb_main_cfg.drv_data = chip;
+ usb_main_cfg.of_node = chg->dev->of_node;
+ chg->usb_main_psy = devm_power_supply_register(chg->dev,
+ &usb_main_psy_desc,
+ &usb_main_cfg);
+ if (IS_ERR(chg->usb_main_psy)) {
+ pr_err("Couldn't register USB main power supply\n");
+ return PTR_ERR(chg->usb_main_psy);
+ }
+
+ return 0;
+}
+
+/*************************
+ * DC PSY REGISTRATION *
+ *************************/
+
+static enum power_supply_property smb5_dc_props[] = {
+ POWER_SUPPLY_PROP_INPUT_SUSPEND,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION,
+ POWER_SUPPLY_PROP_REAL_TYPE,
+};
+
+static int smb5_dc_get_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct smb5 *chip = power_supply_get_drvdata(psy);
+ struct smb_charger *chg = &chip->chg;
+ int rc = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+ val->intval = get_effective_result(chg->dc_suspend_votable);
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ rc = smblib_get_prop_dc_present(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ rc = smblib_get_prop_dc_online(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ rc = smblib_get_prop_dc_voltage_now(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ rc = smblib_get_prop_dc_current_max(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ rc = smblib_get_prop_dc_voltage_max(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_REAL_TYPE:
+ val->intval = POWER_SUPPLY_TYPE_WIPOWER;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (rc < 0) {
+ pr_debug("Couldn't get prop %d rc = %d\n", psp, rc);
+ return -ENODATA;
+ }
+ return 0;
+}
+
+static int smb5_dc_set_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct smb5 *chip = power_supply_get_drvdata(psy);
+ struct smb_charger *chg = &chip->chg;
+ int rc = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+ rc = vote(chg->dc_suspend_votable, WBC_VOTER,
+ (bool)val->intval, 0);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ rc = smblib_set_prop_dc_current_max(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION:
+ rc = smblib_set_prop_voltage_wls_output(chg, val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int smb5_dc_prop_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ int rc;
+
+ switch (psp) {
+ default:
+ rc = 0;
+ break;
+ }
+
+ return rc;
+}
+
+static const struct power_supply_desc dc_psy_desc = {
+ .name = "dc",
+ .type = POWER_SUPPLY_TYPE_WIRELESS,
+ .properties = smb5_dc_props,
+ .num_properties = ARRAY_SIZE(smb5_dc_props),
+ .get_property = smb5_dc_get_prop,
+ .set_property = smb5_dc_set_prop,
+ .property_is_writeable = smb5_dc_prop_is_writeable,
+};
+
+static int smb5_init_dc_psy(struct smb5 *chip)
+{
+ struct power_supply_config dc_cfg = {};
+ struct smb_charger *chg = &chip->chg;
+
+ dc_cfg.drv_data = chip;
+ dc_cfg.of_node = chg->dev->of_node;
+ chg->dc_psy = devm_power_supply_register(chg->dev,
+ &dc_psy_desc,
+ &dc_cfg);
+ if (IS_ERR(chg->dc_psy)) {
+ pr_err("Couldn't register USB power supply\n");
+ return PTR_ERR(chg->dc_psy);
+ }
+
+ return 0;
+}
+
+/*************************
+ * BATT PSY REGISTRATION *
+ *************************/
+static enum power_supply_property smb5_batt_props[] = {
+ POWER_SUPPLY_PROP_INPUT_SUSPEND,
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CHARGER_TEMP,
+ POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED,
+ POWER_SUPPLY_PROP_SW_JEITA_ENABLED,
+ POWER_SUPPLY_PROP_CHARGE_DONE,
+ POWER_SUPPLY_PROP_PARALLEL_DISABLE,
+ POWER_SUPPLY_PROP_SET_SHIP_MODE,
+ POWER_SUPPLY_PROP_DIE_HEALTH,
+ POWER_SUPPLY_PROP_RERUN_AICL,
+ POWER_SUPPLY_PROP_DP_DM,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_RECHARGE_SOC,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_FORCE_RECHARGE,
+ POWER_SUPPLY_PROP_FCC_STEPPER_ENABLE,
+};
+
+static int smb5_batt_get_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct smb_charger *chg = power_supply_get_drvdata(psy);
+ int rc = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ rc = smblib_get_prop_batt_status(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ rc = smblib_get_prop_batt_health(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ rc = smblib_get_prop_batt_present(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+ rc = smblib_get_prop_input_suspend(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ rc = smblib_get_prop_batt_charge_type(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ rc = smblib_get_prop_batt_capacity(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ rc = smblib_get_prop_system_temp_level(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX:
+ rc = smblib_get_prop_system_temp_level_max(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_CHARGER_TEMP:
+ rc = smblib_get_prop_charger_temp(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
+ val->intval = chg->charger_temp_max;
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+ rc = smblib_get_prop_input_current_limited(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
+ val->intval = chg->step_chg_enabled;
+ break;
+ case POWER_SUPPLY_PROP_SW_JEITA_ENABLED:
+ val->intval = chg->sw_jeita_enabled;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ rc = smblib_get_prop_from_bms(chg,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW, val);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ val->intval = get_client_vote(chg->fv_votable,
+ BATT_PROFILE_VOTER);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ rc = smblib_get_prop_from_bms(chg,
+ POWER_SUPPLY_PROP_CURRENT_NOW, val);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ val->intval = get_client_vote(chg->fcc_votable,
+ BATT_PROFILE_VOTER);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ rc = smblib_get_prop_batt_iterm(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ rc = smblib_get_prop_from_bms(chg, POWER_SUPPLY_PROP_TEMP, val);
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_DONE:
+ rc = smblib_get_prop_batt_charge_done(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_PARALLEL_DISABLE:
+ val->intval = get_client_vote(chg->pl_disable_votable,
+ USER_VOTER);
+ break;
+ case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+ /* Not in ship mode as long as device is active */
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_DIE_HEALTH:
+ if (chg->die_health == -EINVAL)
+ val->intval = smblib_get_prop_die_health(chg);
+ else
+ val->intval = chg->die_health;
+ break;
+ case POWER_SUPPLY_PROP_DP_DM:
+ val->intval = chg->pulse_cnt;
+ break;
+ case POWER_SUPPLY_PROP_RERUN_AICL:
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+ rc = smblib_get_prop_from_bms(chg,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER, val);
+ break;
+ case POWER_SUPPLY_PROP_CYCLE_COUNT:
+ rc = smblib_get_prop_from_bms(chg,
+ POWER_SUPPLY_PROP_CYCLE_COUNT, val);
+ break;
+ case POWER_SUPPLY_PROP_RECHARGE_SOC:
+ val->intval = chg->auto_recharge_soc;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE:
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ rc = smblib_get_prop_from_bms(chg,
+ POWER_SUPPLY_PROP_CHARGE_FULL, val);
+ break;
+ case POWER_SUPPLY_PROP_FORCE_RECHARGE:
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_FCC_STEPPER_ENABLE:
+ val->intval = chg->fcc_stepper_enable;
+ break;
+ default:
+ pr_err("batt power supply prop %d not supported\n", psp);
+ return -EINVAL;
+ }
+
+ if (rc < 0) {
+ pr_debug("Couldn't get prop %d rc = %d\n", psp, rc);
+ return -ENODATA;
+ }
+
+ return 0;
+}
+
+static int smb5_batt_set_prop(struct power_supply *psy,
+ enum power_supply_property prop,
+ const union power_supply_propval *val)
+{
+ int rc = 0;
+ struct smb_charger *chg = power_supply_get_drvdata(psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_STATUS:
+ rc = smblib_set_prop_batt_status(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+ rc = smblib_set_prop_input_suspend(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ rc = smblib_set_prop_system_temp_level(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ rc = smblib_set_prop_batt_capacity(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_PARALLEL_DISABLE:
+ vote(chg->pl_disable_votable, USER_VOTER, (bool)val->intval, 0);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ chg->batt_profile_fv_uv = val->intval;
+ vote(chg->fv_votable, BATT_PROFILE_VOTER, true, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
+ chg->step_chg_enabled = !!val->intval;
+ break;
+ case POWER_SUPPLY_PROP_SW_JEITA_ENABLED:
+ if (chg->sw_jeita_enabled != (!!val->intval)) {
+ rc = smblib_disable_hw_jeita(chg, !!val->intval);
+ if (rc == 0)
+ chg->sw_jeita_enabled = !!val->intval;
+ }
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ chg->batt_profile_fcc_ua = val->intval;
+ vote(chg->fcc_votable, BATT_PROFILE_VOTER, true, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+ /* Not in ship mode as long as the device is active */
+ if (!val->intval)
+ break;
+ if (chg->pl.psy)
+ power_supply_set_property(chg->pl.psy,
+ POWER_SUPPLY_PROP_SET_SHIP_MODE, val);
+ rc = smblib_set_prop_ship_mode(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_RERUN_AICL:
+ rc = smblib_rerun_aicl(chg);
+ break;
+ case POWER_SUPPLY_PROP_DP_DM:
+ rc = smblib_dp_dm(chg, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+ rc = smblib_set_prop_input_current_limited(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_DIE_HEALTH:
+ chg->die_health = val->intval;
+ power_supply_changed(chg->batt_psy);
+ break;
+ case POWER_SUPPLY_PROP_RECHARGE_SOC:
+ rc = smblib_set_prop_rechg_soc_thresh(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_FORCE_RECHARGE:
+ /* toggle charging to force recharge */
+ vote(chg->chg_disable_votable, FORCE_RECHARGE_VOTER,
+ true, 0);
+ /* charge disable delay */
+ msleep(50);
+ vote(chg->chg_disable_votable, FORCE_RECHARGE_VOTER,
+ false, 0);
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int smb5_batt_prop_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+ case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+ case POWER_SUPPLY_PROP_CAPACITY:
+ case POWER_SUPPLY_PROP_PARALLEL_DISABLE:
+ case POWER_SUPPLY_PROP_DP_DM:
+ case POWER_SUPPLY_PROP_RERUN_AICL:
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+ case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
+ case POWER_SUPPLY_PROP_SW_JEITA_ENABLED:
+ case POWER_SUPPLY_PROP_DIE_HEALTH:
+ return 1;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const struct power_supply_desc batt_psy_desc = {
+ .name = "battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = smb5_batt_props,
+ .num_properties = ARRAY_SIZE(smb5_batt_props),
+ .get_property = smb5_batt_get_prop,
+ .set_property = smb5_batt_set_prop,
+ .property_is_writeable = smb5_batt_prop_is_writeable,
+};
+
+static int smb5_init_batt_psy(struct smb5 *chip)
+{
+ struct power_supply_config batt_cfg = {};
+ struct smb_charger *chg = &chip->chg;
+ int rc = 0;
+
+ batt_cfg.drv_data = chg;
+ batt_cfg.of_node = chg->dev->of_node;
+ chg->batt_psy = devm_power_supply_register(chg->dev,
+ &batt_psy_desc,
+ &batt_cfg);
+ if (IS_ERR(chg->batt_psy)) {
+ pr_err("Couldn't register battery power supply\n");
+ return PTR_ERR(chg->batt_psy);
+ }
+
+ return rc;
+}
+
+/******************************
+ * VBUS REGULATOR REGISTRATION *
+ ******************************/
+
+static struct regulator_ops smb5_vbus_reg_ops = {
+ .enable = smblib_vbus_regulator_enable,
+ .disable = smblib_vbus_regulator_disable,
+ .is_enabled = smblib_vbus_regulator_is_enabled,
+};
+
+static int smb5_init_vbus_regulator(struct smb5 *chip)
+{
+ struct smb_charger *chg = &chip->chg;
+ struct regulator_config cfg = {};
+ int rc = 0;
+
+ chg->vbus_vreg = devm_kzalloc(chg->dev, sizeof(*chg->vbus_vreg),
+ GFP_KERNEL);
+ if (!chg->vbus_vreg)
+ return -ENOMEM;
+
+ cfg.dev = chg->dev;
+ cfg.driver_data = chip;
+
+ chg->vbus_vreg->rdesc.owner = THIS_MODULE;
+ chg->vbus_vreg->rdesc.type = REGULATOR_VOLTAGE;
+ chg->vbus_vreg->rdesc.ops = &smb5_vbus_reg_ops;
+ chg->vbus_vreg->rdesc.of_match = "qcom,smb5-vbus";
+ chg->vbus_vreg->rdesc.name = "qcom,smb5-vbus";
+
+ chg->vbus_vreg->rdev = devm_regulator_register(chg->dev,
+ &chg->vbus_vreg->rdesc, &cfg);
+ if (IS_ERR(chg->vbus_vreg->rdev)) {
+ rc = PTR_ERR(chg->vbus_vreg->rdev);
+ chg->vbus_vreg->rdev = NULL;
+ if (rc != -EPROBE_DEFER)
+ pr_err("Couldn't register VBUS regulator rc=%d\n", rc);
+ }
+
+ return rc;
+}
+
+/******************************
+ * VCONN REGULATOR REGISTRATION *
+ ******************************/
+
+static struct regulator_ops smb5_vconn_reg_ops = {
+ .enable = smblib_vconn_regulator_enable,
+ .disable = smblib_vconn_regulator_disable,
+ .is_enabled = smblib_vconn_regulator_is_enabled,
+};
+
+static int smb5_init_vconn_regulator(struct smb5 *chip)
+{
+ struct smb_charger *chg = &chip->chg;
+ struct regulator_config cfg = {};
+ int rc = 0;
+
+ if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
+ return 0;
+
+ chg->vconn_vreg = devm_kzalloc(chg->dev, sizeof(*chg->vconn_vreg),
+ GFP_KERNEL);
+ if (!chg->vconn_vreg)
+ return -ENOMEM;
+
+ cfg.dev = chg->dev;
+ cfg.driver_data = chip;
+
+ chg->vconn_vreg->rdesc.owner = THIS_MODULE;
+ chg->vconn_vreg->rdesc.type = REGULATOR_VOLTAGE;
+ chg->vconn_vreg->rdesc.ops = &smb5_vconn_reg_ops;
+ chg->vconn_vreg->rdesc.of_match = "qcom,smb5-vconn";
+ chg->vconn_vreg->rdesc.name = "qcom,smb5-vconn";
+
+ chg->vconn_vreg->rdev = devm_regulator_register(chg->dev,
+ &chg->vconn_vreg->rdesc, &cfg);
+ if (IS_ERR(chg->vconn_vreg->rdev)) {
+ rc = PTR_ERR(chg->vconn_vreg->rdev);
+ chg->vconn_vreg->rdev = NULL;
+ if (rc != -EPROBE_DEFER)
+ pr_err("Couldn't register VCONN regulator rc=%d\n", rc);
+ }
+
+ return rc;
+}
+
+/***************************
+ * HARDWARE INITIALIZATION *
+ ***************************/
+static int smb5_configure_typec(struct smb_charger *chg)
+{
+ int rc;
+
+ /* disable apsd */
+ rc = smblib_configure_hvdcp_apsd(chg, false);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't disable APSD rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Use simple write to clear interrupts */
+ rc = smblib_write(chg, TYPE_C_INTERRUPT_EN_CFG_1_REG, 0);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't configure Type-C interrupts rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Use simple write to enable only required interrupts */
+ rc = smblib_write(chg, TYPE_C_INTERRUPT_EN_CFG_2_REG,
+ TYPEC_SRC_BATT_HPWR_INT_EN_BIT |
+ TYPEC_WATER_DETECTION_INT_EN_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't configure Type-C interrupts rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smblib_masked_write(chg, TYPE_C_MODE_CFG_REG,
+ EN_TRY_SNK_BIT, EN_TRY_SNK_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't enable try.snk rc=%d\n", rc);
+ return rc;
+ }
+ chg->typec_try_mode |= EN_TRY_SNK_BIT;
+
+ /* configure VCONN for software control */
+ rc = smblib_masked_write(chg, TYPE_C_VCONN_CONTROL_REG,
+ VCONN_EN_SRC_BIT | VCONN_EN_VALUE_BIT,
+ VCONN_EN_SRC_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't configure VCONN for SW control rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smblib_masked_write(chg, USBIN_LOAD_CFG_REG,
+ USBIN_IN_COLLAPSE_GF_SEL_MASK | USBIN_AICL_STEP_TIMING_SEL_MASK,
+ 0);
+ if (rc < 0)
+ dev_err(chg->dev,
+ "Couldn't set USBIN_LOAD_CFG_REG rc=%d\n", rc);
+
+ return rc;
+}
+
+static int smb5_configure_micro_usb(struct smb_charger *chg)
+{
+ int rc;
+
+ /* For micro USB connector, use extcon by default */
+ chg->use_extcon = true;
+ chg->pd_not_supported = true;
+
+ rc = smblib_masked_write(chg, TYPE_C_INTERRUPT_EN_CFG_2_REG,
+ MICRO_USB_STATE_CHANGE_INT_EN_BIT,
+ MICRO_USB_STATE_CHANGE_INT_EN_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't configure Type-C interrupts rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Enable HVDCP and BC 1.2 source detection */
+ rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+ HVDCP_EN_BIT | BC1P2_SRC_DETECT_BIT,
+ HVDCP_EN_BIT | BC1P2_SRC_DETECT_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't enable HVDCP detection rc=%d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int smb5_configure_iterm_thresholds_adc(struct smb5 *chip)
+{
+ u8 *buf;
+ int rc = 0;
+ s16 raw_hi_thresh, raw_lo_thresh;
+ struct smb_charger *chg = &chip->chg;
+
+ if (chip->dt.term_current_thresh_hi_ma < -10000 ||
+ chip->dt.term_current_thresh_hi_ma > 10000 ||
+ chip->dt.term_current_thresh_lo_ma < -10000 ||
+ chip->dt.term_current_thresh_lo_ma > 10000) {
+ dev_err(chg->dev, "ITERM threshold out of range rc=%d\n", rc);
+ return -EINVAL;
+ }
+
+ /*
+ * Conversion:
+ * raw (A) = (scaled_mA * ADC_CHG_TERM_MASK) / (10 * 1000)
+ * Note: raw needs to be converted to big-endian format.
+ */
+
+ if (chip->dt.term_current_thresh_hi_ma) {
+ raw_hi_thresh = ((chip->dt.term_current_thresh_hi_ma *
+ ADC_CHG_TERM_MASK) / 10000);
+ raw_hi_thresh = sign_extend32(raw_hi_thresh, 15);
+ buf = (u8 *)&raw_hi_thresh;
+ raw_hi_thresh = buf[1] | (buf[0] << 8);
+
+ rc = smblib_batch_write(chg, CHGR_ADC_ITERM_UP_THD_MSB_REG,
+ (u8 *)&raw_hi_thresh, 2);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure ITERM threshold HIGH rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ if (chip->dt.term_current_thresh_lo_ma) {
+ raw_lo_thresh = ((chip->dt.term_current_thresh_lo_ma *
+ ADC_CHG_TERM_MASK) / 10000);
+ raw_lo_thresh = sign_extend32(raw_lo_thresh, 15);
+ buf = (u8 *)&raw_lo_thresh;
+ raw_lo_thresh = buf[1] | (buf[0] << 8);
+
+ rc = smblib_batch_write(chg, CHGR_ADC_ITERM_LO_THD_MSB_REG,
+ (u8 *)&raw_lo_thresh, 2);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure ITERM threshold LOW rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+static int smb5_configure_iterm_thresholds(struct smb5 *chip)
+{
+ int rc = 0;
+
+ switch (chip->dt.term_current_src) {
+ case ITERM_SRC_ADC:
+ rc = smb5_configure_iterm_thresholds_adc(chip);
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+static int smb5_init_hw(struct smb5 *chip)
+{
+ struct smb_charger *chg = &chip->chg;
+ int rc, type = 0;
+ u8 val = 0;
+ union power_supply_propval pval;
+
+ if (chip->dt.no_battery)
+ chg->fake_capacity = 50;
+
+ if (chip->dt.batt_profile_fcc_ua < 0)
+ smblib_get_charge_param(chg, &chg->param.fcc,
+ &chg->batt_profile_fcc_ua);
+
+ if (chip->dt.batt_profile_fv_uv < 0)
+ smblib_get_charge_param(chg, &chg->param.fv,
+ &chg->batt_profile_fv_uv);
+
+ smblib_get_charge_param(chg, &chg->param.usb_icl,
+ &chg->default_icl_ua);
+
+ if (chg->charger_temp_max == -EINVAL) {
+ rc = smblib_get_thermal_threshold(chg,
+ DIE_REG_H_THRESHOLD_MSB_REG,
+ &chg->charger_temp_max);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't get charger_temp_max rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ /* Disable SMB Temperature ADC INT */
+ rc = smblib_masked_write(chg, MISC_THERMREG_SRC_CFG_REG,
+ THERMREG_SMB_ADC_SRC_EN_BIT, 0);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure SMB thermal regulation rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /*
+ * If SW thermal regulation WA is active then all the HW temperature
+ * comparators need to be disabled to prevent HW thermal regulation,
+ * apart from DIE_TEMP analog comparator for SHDN regulation.
+ */
+ if (chg->wa_flags & SW_THERM_REGULATION_WA) {
+ rc = smblib_write(chg, MISC_THERMREG_SRC_CFG_REG,
+ THERMREG_DIE_CMP_SRC_EN_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't disable HW thermal regulation rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ /* Use SW based VBUS control, disable HW autonomous mode */
+ rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+ HVDCP_AUTH_ALG_EN_CFG_BIT | HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT,
+ HVDCP_AUTH_ALG_EN_CFG_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure HVDCP rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * PMI632 can have the connector type defined by a dedicated register
+ * TYPEC_MICRO_USB_MODE_REG or by a common TYPEC_U_USB_CFG_REG.
+ */
+ if (chg->smb_version == PMI632_SUBTYPE) {
+ rc = smblib_read(chg, TYPEC_MICRO_USB_MODE_REG, &val);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't read USB mode rc=%d\n", rc);
+ return rc;
+ }
+ type = !!(val & MICRO_USB_MODE_ONLY_BIT);
+ }
+
+ /*
+ * If TYPEC_MICRO_USB_MODE_REG is not set and for all non-PMI632
+ * check the connector type using TYPEC_U_USB_CFG_REG.
+ */
+ if (!type) {
+ rc = smblib_read(chg, TYPEC_U_USB_CFG_REG, &val);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't read U_USB config rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ type = !!(val & EN_MICRO_USB_MODE_BIT);
+ }
+
+ pr_debug("Connector type=%s\n", type ? "Micro USB" : "TypeC");
+
+ if (type) {
+ chg->connector_type = POWER_SUPPLY_CONNECTOR_MICRO_USB;
+ rc = smb5_configure_micro_usb(chg);
+ } else {
+ chg->connector_type = POWER_SUPPLY_CONNECTOR_TYPEC;
+ rc = smb5_configure_typec(chg);
+ }
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't configure TypeC/micro-USB mode rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * PMI632 based hw init:
+ * - Rerun APSD to ensure proper charger detection if device
+ * boots with charger connected.
+ * - Initialize flash module for PMI632
+ */
+ if (chg->smb_version == PMI632_SUBTYPE) {
+ schgm_flash_init(chg);
+ smblib_rerun_apsd_if_required(chg);
+ }
+
+ /* clear the ICL override if it is set */
+ rc = smblib_icl_override(chg, false);
+ if (rc < 0) {
+ pr_err("Couldn't disable ICL override rc=%d\n", rc);
+ return rc;
+ }
+
+ /* set OTG current limit */
+ rc = smblib_set_charge_param(chg, &chg->param.otg_cl, chg->otg_cl_ua);
+ if (rc < 0) {
+ pr_err("Couldn't set otg current limit rc=%d\n", rc);
+ return rc;
+ }
+
+ /* vote 0mA on usb_icl for non battery platforms */
+ vote(chg->usb_icl_votable,
+ DEFAULT_VOTER, chip->dt.no_battery, 0);
+ vote(chg->dc_suspend_votable,
+ DEFAULT_VOTER, chip->dt.no_battery, 0);
+ vote(chg->fcc_votable, HW_LIMIT_VOTER,
+ chip->dt.batt_profile_fcc_ua > 0, chip->dt.batt_profile_fcc_ua);
+ vote(chg->fv_votable, HW_LIMIT_VOTER,
+ chip->dt.batt_profile_fv_uv > 0, chip->dt.batt_profile_fv_uv);
+ vote(chg->fcc_votable,
+ BATT_PROFILE_VOTER, chg->batt_profile_fcc_ua > 0,
+ chg->batt_profile_fcc_ua);
+ vote(chg->fv_votable,
+ BATT_PROFILE_VOTER, chg->batt_profile_fv_uv > 0,
+ chg->batt_profile_fv_uv);
+
+ /* Some h/w limit maximum supported ICL */
+ vote(chg->usb_icl_votable, HW_LIMIT_VOTER,
+ chg->hw_max_icl_ua > 0, chg->hw_max_icl_ua);
+
+ /* set DC icl_max 1A */
+ rc = smblib_set_charge_param(chg, &chg->param.dc_icl, 1000000);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't set dc_icl rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * AICL configuration:
+ * start from min and AICL ADC disable, and enable aicl rerun
+ */
+ if (chg->smb_version != PMI632_SUBTYPE) {
+ rc = smblib_masked_write(chg, USBIN_AICL_OPTIONS_CFG_REG,
+ USBIN_AICL_PERIODIC_RERUN_EN_BIT
+ | USBIN_AICL_ADC_EN_BIT | USBIN_AICL_EN_BIT,
+ USBIN_AICL_PERIODIC_RERUN_EN_BIT
+ | USBIN_AICL_EN_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't config AICL rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ rc = smblib_write(chg, AICL_RERUN_TIME_CFG_REG,
+ AICL_RERUN_TIME_12S_VAL);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't configure AICL rerun interval rc=%d\n", rc);
+ return rc;
+ }
+
+ /* enable the charging path */
+ rc = vote(chg->chg_disable_votable, DEFAULT_VOTER, false, 0);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't enable charging rc=%d\n", rc);
+ return rc;
+ }
+
+ /* configure VBUS for software control */
+ rc = smblib_masked_write(chg, DCDC_OTG_CFG_REG, OTG_EN_SRC_CFG_BIT, 0);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't configure VBUS for SW control rc=%d\n", rc);
+ return rc;
+ }
+
+ val = (ilog2(chip->dt.wd_bark_time / 16) << BARK_WDOG_TIMEOUT_SHIFT)
+ & BARK_WDOG_TIMEOUT_MASK;
+ val |= BITE_WDOG_TIMEOUT_8S;
+ rc = smblib_masked_write(chg, SNARL_BARK_BITE_WD_CFG_REG,
+ BITE_WDOG_DISABLE_CHARGING_CFG_BIT |
+ BARK_WDOG_TIMEOUT_MASK | BITE_WDOG_TIMEOUT_MASK,
+ val);
+ if (rc < 0) {
+ pr_err("Couldn't configue WD config rc=%d\n", rc);
+ return rc;
+ }
+
+ /* enable WD BARK and enable it on plugin */
+ rc = smblib_masked_write(chg, WD_CFG_REG,
+ WATCHDOG_TRIGGER_AFP_EN_BIT |
+ WDOG_TIMER_EN_ON_PLUGIN_BIT |
+ BARK_WDOG_INT_EN_BIT,
+ WDOG_TIMER_EN_ON_PLUGIN_BIT |
+ BARK_WDOG_INT_EN_BIT);
+ if (rc < 0) {
+ pr_err("Couldn't configue WD config rc=%d\n", rc);
+ return rc;
+ }
+
+ /* set termination current threshold values */
+ rc = smb5_configure_iterm_thresholds(chip);
+ if (rc < 0) {
+ pr_err("Couldn't configure ITERM thresholds rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* configure float charger options */
+ switch (chip->dt.float_option) {
+ case FLOAT_DCP:
+ rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+ FLOAT_OPTIONS_MASK, 0);
+ break;
+ case FLOAT_SDP:
+ rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+ FLOAT_OPTIONS_MASK, FORCE_FLOAT_SDP_CFG_BIT);
+ break;
+ case DISABLE_CHARGING:
+ rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+ FLOAT_OPTIONS_MASK, FLOAT_DIS_CHGING_CFG_BIT);
+ break;
+ case SUSPEND_INPUT:
+ rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+ FLOAT_OPTIONS_MASK, SUSPEND_FLOAT_CFG_BIT);
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure float charger options rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = smblib_read(chg, USBIN_OPTIONS_2_CFG_REG, &chg->float_cfg);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't read float charger options rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ switch (chip->dt.chg_inhibit_thr_mv) {
+ case 50:
+ rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
+ CHARGE_INHIBIT_THRESHOLD_MASK,
+ INHIBIT_ANALOG_VFLT_MINUS_50MV);
+ break;
+ case 100:
+ rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
+ CHARGE_INHIBIT_THRESHOLD_MASK,
+ INHIBIT_ANALOG_VFLT_MINUS_100MV);
+ break;
+ case 200:
+ rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
+ CHARGE_INHIBIT_THRESHOLD_MASK,
+ INHIBIT_ANALOG_VFLT_MINUS_200MV);
+ break;
+ case 300:
+ rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
+ CHARGE_INHIBIT_THRESHOLD_MASK,
+ INHIBIT_ANALOG_VFLT_MINUS_300MV);
+ break;
+ case 0:
+ rc = smblib_masked_write(chg, CHGR_CFG2_REG,
+ CHARGER_INHIBIT_BIT, 0);
+ default:
+ break;
+ }
+
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure charge inhibit threshold rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = smblib_write(chg, CHGR_FAST_CHARGE_SAFETY_TIMER_CFG_REG,
+ FAST_CHARGE_SAFETY_TIMER_768_MIN);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't set CHGR_FAST_CHARGE_SAFETY_TIMER_CFG_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = smblib_masked_write(chg, CHGR_CFG2_REG, RECHG_MASK,
+ (chip->dt.auto_recharge_vbat_mv != -EINVAL) ?
+ VBAT_BASED_RECHG_BIT : 0);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure VBAT-rechg CHG_CFG2_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* program the auto-recharge VBAT threshold */
+ if (chip->dt.auto_recharge_vbat_mv != -EINVAL) {
+ u32 temp = VBAT_TO_VRAW_ADC(chip->dt.auto_recharge_vbat_mv);
+
+ temp = ((temp & 0xFF00) >> 8) | ((temp & 0xFF) << 8);
+ rc = smblib_batch_write(chg,
+ CHGR_ADC_RECHARGE_THRESHOLD_MSB_REG, (u8 *)&temp, 2);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure ADC_RECHARGE_THRESHOLD REG rc=%d\n",
+ rc);
+ return rc;
+ }
+ /* Program the sample count for VBAT based recharge to 3 */
+ rc = smblib_masked_write(chg, CHGR_NO_SAMPLE_TERM_RCHG_CFG_REG,
+ NO_OF_SAMPLE_FOR_RCHG,
+ 2 << NO_OF_SAMPLE_FOR_RCHG_SHIFT);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure CHGR_NO_SAMPLE_FOR_TERM_RCHG_CFG rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = smblib_masked_write(chg, CHGR_CFG2_REG, RECHG_MASK,
+ (chip->dt.auto_recharge_soc != -EINVAL) ?
+ SOC_BASED_RECHG_BIT : VBAT_BASED_RECHG_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure SOC-rechg CHG_CFG2_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* program the auto-recharge threshold */
+ if (chip->dt.auto_recharge_soc != -EINVAL) {
+ pval.intval = chip->dt.auto_recharge_soc;
+ rc = smblib_set_prop_rechg_soc_thresh(chg, &pval);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure CHG_RCHG_SOC_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* Program the sample count for SOC based recharge to 1 */
+ rc = smblib_masked_write(chg, CHGR_NO_SAMPLE_TERM_RCHG_CFG_REG,
+ NO_OF_SAMPLE_FOR_RCHG, 0);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure CHGR_NO_SAMPLE_FOR_TERM_RCHG_CFG rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ /* set the Source (OTG) mode current limit */
+ rc = smblib_masked_write(chg, DCDC_OTG_CURRENT_LIMIT_CFG_REG,
+ OTG_CURRENT_LIMIT_MASK, OTG_CURRENT_LIMIT_3000_MA);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure DCDC_OTG_CURRENT_LIMIT_CFG rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (chg->sw_jeita_enabled) {
+ rc = smblib_disable_hw_jeita(chg, true);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't set hw jeita rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ rc = smblib_masked_write(chg, DCDC_ENG_SDCDC_CFG5_REG,
+ ENG_SDCDC_BAT_HPWR_MASK, BOOST_MODE_THRESH_3P6_V);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure DCDC_ENG_SDCDC_CFG5 rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int smb5_post_init(struct smb5 *chip)
+{
+ struct smb_charger *chg = &chip->chg;
+ union power_supply_propval pval;
+ int rc;
+
+ /*
+ * In case the usb path is suspended, we would have missed disabling
+ * the icl change interrupt because the interrupt could have been
+ * not requested
+ */
+ rerun_election(chg->usb_icl_votable);
+
+ /* configure power role for dual-role */
+ pval.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+ rc = smblib_set_prop_typec_power_role(chg, &pval);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure DRP role rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rerun_election(chg->usb_irq_enable_votable);
+
+ return 0;
+}
+
+/****************************
+ * DETERMINE INITIAL STATUS *
+ ****************************/
+
+static int smb5_determine_initial_status(struct smb5 *chip)
+{
+ struct smb_irq_data irq_data = {chip, "determine-initial-status"};
+ struct smb_charger *chg = &chip->chg;
+ union power_supply_propval val;
+ int rc;
+
+ rc = smblib_get_prop_usb_present(chg, &val);
+ if (rc < 0) {
+ pr_err("Couldn't get usb present rc=%d\n", rc);
+ return rc;
+ }
+ chg->early_usb_attach = val.intval;
+
+ if (chg->bms_psy)
+ smblib_suspend_on_debug_battery(chg);
+
+ usb_plugin_irq_handler(0, &irq_data);
+ typec_attach_detach_irq_handler(0, &irq_data);
+ typec_state_change_irq_handler(0, &irq_data);
+ usb_source_change_irq_handler(0, &irq_data);
+ chg_state_change_irq_handler(0, &irq_data);
+ icl_change_irq_handler(0, &irq_data);
+ batt_temp_changed_irq_handler(0, &irq_data);
+ wdog_bark_irq_handler(0, &irq_data);
+ typec_or_rid_detection_change_irq_handler(0, &irq_data);
+ wdog_snarl_irq_handler(0, &irq_data);
+
+ return 0;
+}
+
+/**************************
+ * INTERRUPT REGISTRATION *
+ **************************/
+
+static struct smb_irq_info smb5_irqs[] = {
+ /* CHARGER IRQs */
+ [CHGR_ERROR_IRQ] = {
+ .name = "chgr-error",
+ .handler = default_irq_handler,
+ },
+ [CHG_STATE_CHANGE_IRQ] = {
+ .name = "chg-state-change",
+ .handler = chg_state_change_irq_handler,
+ .wake = true,
+ },
+ [STEP_CHG_STATE_CHANGE_IRQ] = {
+ .name = "step-chg-state-change",
+ },
+ [STEP_CHG_SOC_UPDATE_FAIL_IRQ] = {
+ .name = "step-chg-soc-update-fail",
+ },
+ [STEP_CHG_SOC_UPDATE_REQ_IRQ] = {
+ .name = "step-chg-soc-update-req",
+ },
+ [FG_FVCAL_QUALIFIED_IRQ] = {
+ .name = "fg-fvcal-qualified",
+ },
+ [VPH_ALARM_IRQ] = {
+ .name = "vph-alarm",
+ },
+ [VPH_DROP_PRECHG_IRQ] = {
+ .name = "vph-drop-prechg",
+ },
+ /* DCDC IRQs */
+ [OTG_FAIL_IRQ] = {
+ .name = "otg-fail",
+ .handler = default_irq_handler,
+ },
+ [OTG_OC_DISABLE_SW_IRQ] = {
+ .name = "otg-oc-disable-sw",
+ },
+ [OTG_OC_HICCUP_IRQ] = {
+ .name = "otg-oc-hiccup",
+ },
+ [BSM_ACTIVE_IRQ] = {
+ .name = "bsm-active",
+ },
+ [HIGH_DUTY_CYCLE_IRQ] = {
+ .name = "high-duty-cycle",
+ .handler = high_duty_cycle_irq_handler,
+ .wake = true,
+ },
+ [INPUT_CURRENT_LIMITING_IRQ] = {
+ .name = "input-current-limiting",
+ .handler = default_irq_handler,
+ },
+ [CONCURRENT_MODE_DISABLE_IRQ] = {
+ .name = "concurrent-mode-disable",
+ },
+ [SWITCHER_POWER_OK_IRQ] = {
+ .name = "switcher-power-ok",
+ .handler = switcher_power_ok_irq_handler,
+ },
+ /* BATTERY IRQs */
+ [BAT_TEMP_IRQ] = {
+ .name = "bat-temp",
+ .handler = batt_temp_changed_irq_handler,
+ .wake = true,
+ },
+ [ALL_CHNL_CONV_DONE_IRQ] = {
+ .name = "all-chnl-conv-done",
+ },
+ [BAT_OV_IRQ] = {
+ .name = "bat-ov",
+ .handler = batt_psy_changed_irq_handler,
+ },
+ [BAT_LOW_IRQ] = {
+ .name = "bat-low",
+ .handler = batt_psy_changed_irq_handler,
+ },
+ [BAT_THERM_OR_ID_MISSING_IRQ] = {
+ .name = "bat-therm-or-id-missing",
+ .handler = batt_psy_changed_irq_handler,
+ },
+ [BAT_TERMINAL_MISSING_IRQ] = {
+ .name = "bat-terminal-missing",
+ .handler = batt_psy_changed_irq_handler,
+ },
+ [BUCK_OC_IRQ] = {
+ .name = "buck-oc",
+ },
+ [VPH_OV_IRQ] = {
+ .name = "vph-ov",
+ },
+ /* USB INPUT IRQs */
+ [USBIN_COLLAPSE_IRQ] = {
+ .name = "usbin-collapse",
+ .handler = default_irq_handler,
+ },
+ [USBIN_VASHDN_IRQ] = {
+ .name = "usbin-vashdn",
+ .handler = default_irq_handler,
+ },
+ [USBIN_UV_IRQ] = {
+ .name = "usbin-uv",
+ .handler = usbin_uv_irq_handler,
+ },
+ [USBIN_OV_IRQ] = {
+ .name = "usbin-ov",
+ .handler = default_irq_handler,
+ },
+ [USBIN_PLUGIN_IRQ] = {
+ .name = "usbin-plugin",
+ .handler = usb_plugin_irq_handler,
+ .wake = true,
+ },
+ [USBIN_REVI_CHANGE_IRQ] = {
+ .name = "usbin-revi-change",
+ },
+ [USBIN_SRC_CHANGE_IRQ] = {
+ .name = "usbin-src-change",
+ .handler = usb_source_change_irq_handler,
+ .wake = true,
+ },
+ [USBIN_ICL_CHANGE_IRQ] = {
+ .name = "usbin-icl-change",
+ .handler = icl_change_irq_handler,
+ .wake = true,
+ },
+ /* DC INPUT IRQs */
+ [DCIN_VASHDN_IRQ] = {
+ .name = "dcin-vashdn",
+ },
+ [DCIN_UV_IRQ] = {
+ .name = "dcin-uv",
+ .handler = default_irq_handler,
+ },
+ [DCIN_OV_IRQ] = {
+ .name = "dcin-ov",
+ .handler = default_irq_handler,
+ },
+ [DCIN_PLUGIN_IRQ] = {
+ .name = "dcin-plugin",
+ .handler = dc_plugin_irq_handler,
+ .wake = true,
+ },
+ [DCIN_REVI_IRQ] = {
+ .name = "dcin-revi",
+ },
+ [DCIN_PON_IRQ] = {
+ .name = "dcin-pon",
+ .handler = default_irq_handler,
+ },
+ [DCIN_EN_IRQ] = {
+ .name = "dcin-en",
+ .handler = default_irq_handler,
+ },
+ /* TYPEC IRQs */
+ [TYPEC_OR_RID_DETECTION_CHANGE_IRQ] = {
+ .name = "typec-or-rid-detect-change",
+ .handler = typec_or_rid_detection_change_irq_handler,
+ .wake = true,
+ },
+ [TYPEC_VPD_DETECT_IRQ] = {
+ .name = "typec-vpd-detect",
+ },
+ [TYPEC_CC_STATE_CHANGE_IRQ] = {
+ .name = "typec-cc-state-change",
+ .handler = typec_state_change_irq_handler,
+ .wake = true,
+ },
+ [TYPEC_VCONN_OC_IRQ] = {
+ .name = "typec-vconn-oc",
+ .handler = default_irq_handler,
+ },
+ [TYPEC_VBUS_CHANGE_IRQ] = {
+ .name = "typec-vbus-change",
+ },
+ [TYPEC_ATTACH_DETACH_IRQ] = {
+ .name = "typec-attach-detach",
+ .handler = typec_attach_detach_irq_handler,
+ .wake = true,
+ },
+ [TYPEC_LEGACY_CABLE_DETECT_IRQ] = {
+ .name = "typec-legacy-cable-detect",
+ .handler = default_irq_handler,
+ },
+ [TYPEC_TRY_SNK_SRC_DETECT_IRQ] = {
+ .name = "typec-try-snk-src-detect",
+ },
+ /* MISCELLANEOUS IRQs */
+ [WDOG_SNARL_IRQ] = {
+ .name = "wdog-snarl",
+ .handler = wdog_snarl_irq_handler,
+ .wake = true,
+ },
+ [WDOG_BARK_IRQ] = {
+ .name = "wdog-bark",
+ .handler = wdog_bark_irq_handler,
+ .wake = true,
+ },
+ [AICL_FAIL_IRQ] = {
+ .name = "aicl-fail",
+ },
+ [AICL_DONE_IRQ] = {
+ .name = "aicl-done",
+ .handler = default_irq_handler,
+ },
+ [SMB_EN_IRQ] = {
+ .name = "smb-en",
+ },
+ [IMP_TRIGGER_IRQ] = {
+ .name = "imp-trigger",
+ },
+ [TEMP_CHANGE_IRQ] = {
+ .name = "temp-change",
+ },
+ [TEMP_CHANGE_SMB_IRQ] = {
+ .name = "temp-change-smb",
+ },
+ /* FLASH */
+ [VREG_OK_IRQ] = {
+ .name = "vreg-ok",
+ },
+ [ILIM_S2_IRQ] = {
+ .name = "ilim2-s2",
+ .handler = schgm_flash_ilim2_irq_handler,
+ },
+ [ILIM_S1_IRQ] = {
+ .name = "ilim1-s1",
+ },
+ [VOUT_DOWN_IRQ] = {
+ .name = "vout-down",
+ },
+ [VOUT_UP_IRQ] = {
+ .name = "vout-up",
+ },
+ [FLASH_STATE_CHANGE_IRQ] = {
+ .name = "flash-state-change",
+ .handler = schgm_flash_state_change_irq_handler,
+ },
+ [TORCH_REQ_IRQ] = {
+ .name = "torch-req",
+ },
+ [FLASH_EN_IRQ] = {
+ .name = "flash-en",
+ },
+};
+
+static int smb5_get_irq_index_byname(const char *irq_name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(smb5_irqs); i++) {
+ if (strcmp(smb5_irqs[i].name, irq_name) == 0)
+ return i;
+ }
+
+ return -ENOENT;
+}
+
+static int smb5_request_interrupt(struct smb5 *chip,
+ struct device_node *node, const char *irq_name)
+{
+ struct smb_charger *chg = &chip->chg;
+ int rc, irq, irq_index;
+ struct smb_irq_data *irq_data;
+
+ irq = of_irq_get_byname(node, irq_name);
+ if (irq < 0) {
+ pr_err("Couldn't get irq %s byname\n", irq_name);
+ return irq;
+ }
+
+ irq_index = smb5_get_irq_index_byname(irq_name);
+ if (irq_index < 0) {
+ pr_err("%s is not a defined irq\n", irq_name);
+ return irq_index;
+ }
+
+ if (!smb5_irqs[irq_index].handler)
+ return 0;
+
+ irq_data = devm_kzalloc(chg->dev, sizeof(*irq_data), GFP_KERNEL);
+ if (!irq_data)
+ return -ENOMEM;
+
+ irq_data->parent_data = chip;
+ irq_data->name = irq_name;
+ irq_data->storm_data = smb5_irqs[irq_index].storm_data;
+ mutex_init(&irq_data->storm_data.storm_lock);
+
+ rc = devm_request_threaded_irq(chg->dev, irq, NULL,
+ smb5_irqs[irq_index].handler,
+ IRQF_ONESHOT, irq_name, irq_data);
+ if (rc < 0) {
+ pr_err("Couldn't request irq %d\n", irq);
+ return rc;
+ }
+
+ smb5_irqs[irq_index].irq = irq;
+ smb5_irqs[irq_index].irq_data = irq_data;
+ if (smb5_irqs[irq_index].wake)
+ enable_irq_wake(irq);
+
+ return rc;
+}
+
+static int smb5_request_interrupts(struct smb5 *chip)
+{
+ struct smb_charger *chg = &chip->chg;
+ struct device_node *node = chg->dev->of_node;
+ struct device_node *child;
+ int rc = 0;
+ const char *name;
+ struct property *prop;
+
+ for_each_available_child_of_node(node, child) {
+ of_property_for_each_string(child, "interrupt-names",
+ prop, name) {
+ rc = smb5_request_interrupt(chip, child, name);
+ if (rc < 0)
+ return rc;
+ }
+ }
+ if (chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq)
+ chg->usb_icl_change_irq_enabled = true;
+
+ /*
+ * Disable WDOG SNARL IRQ by default to prevent IRQ storm. If required
+ * for any application, enable it through votable.
+ */
+ if (chg->irq_info[WDOG_SNARL_IRQ].irq)
+ vote(chg->wdog_snarl_irq_en_votable, DEFAULT_VOTER, false, 0);
+
+ return rc;
+}
+
+static void smb5_free_interrupts(struct smb_charger *chg)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(smb5_irqs); i++) {
+ if (smb5_irqs[i].irq > 0) {
+ if (smb5_irqs[i].wake)
+ disable_irq_wake(smb5_irqs[i].irq);
+
+ devm_free_irq(chg->dev, smb5_irqs[i].irq,
+ smb5_irqs[i].irq_data);
+ }
+ }
+}
+
+static void smb5_disable_interrupts(struct smb_charger *chg)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(smb5_irqs); i++) {
+ if (smb5_irqs[i].irq > 0)
+ disable_irq(smb5_irqs[i].irq);
+ }
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int force_batt_psy_update_write(void *data, u64 val)
+{
+ struct smb_charger *chg = data;
+
+ power_supply_changed(chg->batt_psy);
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(force_batt_psy_update_ops, NULL,
+ force_batt_psy_update_write, "0x%02llx\n");
+
+static int force_usb_psy_update_write(void *data, u64 val)
+{
+ struct smb_charger *chg = data;
+
+ power_supply_changed(chg->usb_psy);
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(force_usb_psy_update_ops, NULL,
+ force_usb_psy_update_write, "0x%02llx\n");
+
+static int force_dc_psy_update_write(void *data, u64 val)
+{
+ struct smb_charger *chg = data;
+
+ power_supply_changed(chg->dc_psy);
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(force_dc_psy_update_ops, NULL,
+ force_dc_psy_update_write, "0x%02llx\n");
+
+static void smb5_create_debugfs(struct smb5 *chip)
+{
+ struct dentry *file;
+
+ chip->dfs_root = debugfs_create_dir("charger", NULL);
+ if (IS_ERR_OR_NULL(chip->dfs_root)) {
+ pr_err("Couldn't create charger debugfs rc=%ld\n",
+ (long)chip->dfs_root);
+ return;
+ }
+
+ file = debugfs_create_file("force_batt_psy_update", 0600,
+ chip->dfs_root, chip, &force_batt_psy_update_ops);
+ if (IS_ERR_OR_NULL(file))
+ pr_err("Couldn't create force_batt_psy_update file rc=%ld\n",
+ (long)file);
+
+ file = debugfs_create_file("force_usb_psy_update", 0600,
+ chip->dfs_root, chip, &force_usb_psy_update_ops);
+ if (IS_ERR_OR_NULL(file))
+ pr_err("Couldn't create force_usb_psy_update file rc=%ld\n",
+ (long)file);
+
+ file = debugfs_create_file("force_dc_psy_update", 0600,
+ chip->dfs_root, chip, &force_dc_psy_update_ops);
+ if (IS_ERR_OR_NULL(file))
+ pr_err("Couldn't create force_dc_psy_update file rc=%ld\n",
+ (long)file);
+
+ file = debugfs_create_u32("debug_mask", 0600, chip->dfs_root,
+ &__debug_mask);
+ if (IS_ERR_OR_NULL(file))
+ pr_err("Couldn't create debug_mask file rc=%ld\n", (long)file);
+}
+
+#else
+
+static void smb5_create_debugfs(struct smb5 *chip)
+{}
+
+#endif
+
+static int smb5_show_charger_status(struct smb5 *chip)
+{
+ struct smb_charger *chg = &chip->chg;
+ union power_supply_propval val;
+ int usb_present, batt_present, batt_health, batt_charge_type;
+ int rc;
+
+ rc = smblib_get_prop_usb_present(chg, &val);
+ if (rc < 0) {
+ pr_err("Couldn't get usb present rc=%d\n", rc);
+ return rc;
+ }
+ usb_present = val.intval;
+
+ rc = smblib_get_prop_batt_present(chg, &val);
+ if (rc < 0) {
+ pr_err("Couldn't get batt present rc=%d\n", rc);
+ return rc;
+ }
+ batt_present = val.intval;
+
+ rc = smblib_get_prop_batt_health(chg, &val);
+ if (rc < 0) {
+ pr_err("Couldn't get batt health rc=%d\n", rc);
+ val.intval = POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+ batt_health = val.intval;
+
+ rc = smblib_get_prop_batt_charge_type(chg, &val);
+ if (rc < 0) {
+ pr_err("Couldn't get batt charge type rc=%d\n", rc);
+ return rc;
+ }
+ batt_charge_type = val.intval;
+
+ pr_info("SMB5 status - usb:present=%d type=%d batt:present = %d health = %d charge = %d\n",
+ usb_present, chg->real_charger_type,
+ batt_present, batt_health, batt_charge_type);
+ return rc;
+}
+
+static int smb5_probe(struct platform_device *pdev)
+{
+ struct smb5 *chip;
+ struct smb_charger *chg;
+ int rc = 0;
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chg = &chip->chg;
+ chg->dev = &pdev->dev;
+ chg->debug_mask = &__debug_mask;
+ chg->pd_disabled = 0;
+ chg->weak_chg_icl_ua = 500000;
+ chg->mode = PARALLEL_MASTER;
+ chg->irq_info = smb5_irqs;
+ chg->die_health = -EINVAL;
+ chg->connector_health = -EINVAL;
+ chg->otg_present = false;
+ chg->main_fcc_max = -EINVAL;
+
+ chg->regmap = dev_get_regmap(chg->dev->parent, NULL);
+ if (!chg->regmap) {
+ pr_err("parent regmap is missing\n");
+ return -EINVAL;
+ }
+
+ rc = smb5_chg_config_init(chip);
+ if (rc < 0) {
+ if (rc != -EPROBE_DEFER)
+ pr_err("Couldn't setup chg_config rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smb5_parse_dt(chip);
+ if (rc < 0) {
+ pr_err("Couldn't parse device tree rc=%d\n", rc);
+ return rc;
+ }
+
+ if (alarmtimer_get_rtcdev())
+ alarm_init(&chg->lpd_recheck_timer, ALARM_REALTIME,
+ smblib_lpd_recheck_timer);
+ else
+ return -EPROBE_DEFER;
+
+ rc = smblib_init(chg);
+ if (rc < 0) {
+ pr_err("Smblib_init failed rc=%d\n", rc);
+ return rc;
+ }
+
+ /* set driver data before resources request it */
+ platform_set_drvdata(pdev, chip);
+
+ /* extcon registration */
+ chg->extcon = devm_extcon_dev_allocate(chg->dev, smblib_extcon_cable);
+ if (IS_ERR(chg->extcon)) {
+ rc = PTR_ERR(chg->extcon);
+ dev_err(chg->dev, "failed to allocate extcon device rc=%d\n",
+ rc);
+ goto cleanup;
+ }
+
+ rc = devm_extcon_dev_register(chg->dev, chg->extcon);
+ if (rc < 0) {
+ dev_err(chg->dev, "failed to register extcon device rc=%d\n",
+ rc);
+ goto cleanup;
+ }
+
+ rc = smb5_init_hw(chip);
+ if (rc < 0) {
+ pr_err("Couldn't initialize hardware rc=%d\n", rc);
+ goto cleanup;
+ }
+
+ /*
+ * VBUS regulator enablement/disablement for host mode is handled
+ * by USB-PD driver only. For micro-USB and non-PD typeC designs,
+ * the VBUS regulator is enabled/disabled by the smb driver itself
+ * before sending extcon notifications.
+ * Hence, register vbus and vconn regulators for PD supported designs
+ * only.
+ */
+ if (!chg->pd_not_supported) {
+ rc = smb5_init_vbus_regulator(chip);
+ if (rc < 0) {
+ pr_err("Couldn't initialize vbus regulator rc=%d\n",
+ rc);
+ goto cleanup;
+ }
+
+ rc = smb5_init_vconn_regulator(chip);
+ if (rc < 0) {
+ pr_err("Couldn't initialize vconn regulator rc=%d\n",
+ rc);
+ goto cleanup;
+ }
+ }
+
+ switch (chg->smb_version) {
+ case PM8150B_SUBTYPE:
+ case PM6150_SUBTYPE:
+ rc = smb5_init_dc_psy(chip);
+ if (rc < 0) {
+ pr_err("Couldn't initialize dc psy rc=%d\n", rc);
+ goto cleanup;
+ }
+ break;
+ default:
+ break;
+ }
+
+ rc = smb5_init_usb_psy(chip);
+ if (rc < 0) {
+ pr_err("Couldn't initialize usb psy rc=%d\n", rc);
+ goto cleanup;
+ }
+
+ rc = smb5_init_usb_main_psy(chip);
+ if (rc < 0) {
+ pr_err("Couldn't initialize usb main psy rc=%d\n", rc);
+ goto cleanup;
+ }
+
+ rc = smb5_init_usb_port_psy(chip);
+ if (rc < 0) {
+ pr_err("Couldn't initialize usb pc_port psy rc=%d\n", rc);
+ goto cleanup;
+ }
+
+ rc = smb5_init_batt_psy(chip);
+ if (rc < 0) {
+ pr_err("Couldn't initialize batt psy rc=%d\n", rc);
+ goto cleanup;
+ }
+
+ rc = smb5_determine_initial_status(chip);
+ if (rc < 0) {
+ pr_err("Couldn't determine initial status rc=%d\n",
+ rc);
+ goto cleanup;
+ }
+
+ rc = smb5_request_interrupts(chip);
+ if (rc < 0) {
+ pr_err("Couldn't request interrupts rc=%d\n", rc);
+ goto cleanup;
+ }
+
+ rc = smb5_post_init(chip);
+ if (rc < 0) {
+ pr_err("Failed in post init rc=%d\n", rc);
+ goto free_irq;
+ }
+
+ smb5_create_debugfs(chip);
+
+ rc = sysfs_create_groups(&chg->dev->kobj, smb5_groups);
+ if (rc < 0) {
+ pr_err("Couldn't create sysfs files rc=%d\n", rc);
+ goto free_irq;
+ }
+
+ rc = smb5_show_charger_status(chip);
+ if (rc < 0) {
+ pr_err("Failed in getting charger status rc=%d\n", rc);
+ goto free_irq;
+ }
+
+ device_init_wakeup(chg->dev, true);
+
+ pr_info("QPNP SMB5 probed successfully\n");
+
+ return rc;
+
+free_irq:
+ smb5_free_interrupts(chg);
+cleanup:
+ smblib_deinit(chg);
+ platform_set_drvdata(pdev, NULL);
+
+ return rc;
+}
+
+static int smb5_remove(struct platform_device *pdev)
+{
+ struct smb5 *chip = platform_get_drvdata(pdev);
+ struct smb_charger *chg = &chip->chg;
+
+ /* force enable APSD */
+ smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+ BC1P2_SRC_DETECT_BIT, BC1P2_SRC_DETECT_BIT);
+
+ smb5_free_interrupts(chg);
+ smblib_deinit(chg);
+ sysfs_remove_groups(&chg->dev->kobj, smb5_groups);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static void smb5_shutdown(struct platform_device *pdev)
+{
+ struct smb5 *chip = platform_get_drvdata(pdev);
+ struct smb_charger *chg = &chip->chg;
+
+ /* disable all interrupts */
+ smb5_disable_interrupts(chg);
+
+ /* configure power role for UFP */
+ if (chg->connector_type == POWER_SUPPLY_CONNECTOR_TYPEC)
+ smblib_masked_write(chg, TYPE_C_MODE_CFG_REG,
+ TYPEC_POWER_ROLE_CMD_MASK, EN_SNK_ONLY_BIT);
+
+ /* force HVDCP to 5V */
+ smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+ HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT, 0);
+ smblib_write(chg, CMD_HVDCP_2_REG, FORCE_5V_BIT);
+
+ /* force enable APSD */
+ smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+ BC1P2_SRC_DETECT_BIT, BC1P2_SRC_DETECT_BIT);
+}
+
+static const struct of_device_id match_table[] = {
+ { .compatible = "qcom,qpnp-smb5", },
+ { },
+};
+
+static struct platform_driver smb5_driver = {
+ .driver = {
+ .name = "qcom,qpnp-smb5",
+ .of_match_table = match_table,
+ },
+ .probe = smb5_probe,
+ .remove = smb5_remove,
+ .shutdown = smb5_shutdown,
+};
+module_platform_driver(smb5_driver);
+
+MODULE_DESCRIPTION("QPNP SMB5 Charger Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/qcom/schgm-flash.c b/drivers/power/supply/qcom/schgm-flash.c
new file mode 100644
index 0000000..201265a
--- /dev/null
+++ b/drivers/power/supply/qcom/schgm-flash.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "SCHG-FLASH: %s: " fmt, __func__
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/power_supply.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/printk.h>
+#include <linux/pmic-voter.h>
+#include "smb5-lib.h"
+#include "schgm-flash.h"
+
+#define IS_BETWEEN(left, right, value) \
+ (((left) >= (right) && (left) >= (value) \
+ && (value) >= (right)) \
+ || ((left) <= (right) && (left) <= (value) \
+ && (value) <= (right)))
+
+irqreturn_t schgm_flash_default_irq_handler(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+
+ pr_debug("IRQ: %s\n", irq_data->name);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t schgm_flash_ilim2_irq_handler(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+ int rc;
+
+ rc = smblib_write(chg, SCHGM_FLASH_S2_LATCH_RESET_CMD_REG,
+ FLASH_S2_LATCH_RESET_BIT);
+ if (rc < 0)
+ pr_err("Couldn't reset S2_LATCH reset rc=%d\n", rc);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t schgm_flash_state_change_irq_handler(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+ int rc;
+ u8 reg;
+
+ rc = smblib_read(chg, SCHGM_FLASH_STATUS_3_REG, ®);
+ if (rc < 0)
+ pr_err("Couldn't read flash status_3 rc=%d\n", rc);
+ else
+ pr_debug("Flash status changed state=[%x]\n",
+ (reg && FLASH_STATE_MASK));
+
+ return IRQ_HANDLED;
+}
+
+#define FIXED_MODE 0
+#define ADAPTIVE_MODE 1
+static void schgm_flash_parse_dt(struct smb_charger *chg)
+{
+ struct device_node *node = chg->dev->of_node;
+ u32 val;
+ int rc;
+
+ chg->flash_derating_soc = -EINVAL;
+ rc = of_property_read_u32(node, "qcom,flash-derating-soc", &val);
+ if (!rc) {
+ if (IS_BETWEEN(0, 100, val))
+ chg->flash_derating_soc = (val * 255) / 100;
+ }
+
+ chg->flash_disable_soc = -EINVAL;
+ rc = of_property_read_u32(node, "qcom,flash-disable-soc", &val);
+ if (!rc) {
+ if (IS_BETWEEN(0, 100, val))
+ chg->flash_disable_soc = (val * 255) / 100;
+ }
+
+ chg->headroom_mode = -EINVAL;
+ rc = of_property_read_u32(node, "qcom,headroom-mode", &val);
+ if (!rc) {
+ if (IS_BETWEEN(FIXED_MODE, ADAPTIVE_MODE, val))
+ chg->headroom_mode = val;
+ }
+}
+
+int schgm_flash_get_vreg_ok(struct smb_charger *chg, int *val)
+{
+ int rc, vreg_state;
+ u8 stat = 0;
+
+ if (!chg->flash_init_done)
+ return -EPERM;
+
+ rc = smblib_read(chg, SCHGM_FLASH_STATUS_2_REG, &stat);
+ if (rc < 0) {
+ pr_err("Couldn't read FLASH STATUS_2 rc=%d\n", rc);
+ return rc;
+ }
+ vreg_state = !!(stat & VREG_OK_BIT);
+
+ /* If VREG_OK is not set check for flash error */
+ if (!vreg_state) {
+ rc = smblib_read(chg, SCHGM_FLASH_STATUS_3_REG, &stat);
+ if (rc < 0) {
+ pr_err("Couldn't read FLASH_STATUS_3 rc=%d\n", rc);
+ return rc;
+ }
+ if ((stat & FLASH_STATE_MASK) == FLASH_ERROR_VAL) {
+ vreg_state = -EFAULT;
+ rc = smblib_read(chg, SCHGM_FLASH_STATUS_5_REG,
+ &stat);
+ if (rc < 0) {
+ pr_err("Couldn't read FLASH_STATUS_5 rc=%d\n",
+ rc);
+ return rc;
+ }
+ pr_debug("Flash error: status=%x\n", stat);
+ }
+ }
+
+ /*
+ * val can be one of the following:
+ * 1 - VREG_OK is set.
+ * 0 - VREG_OK is 0 but no Flash error.
+ * -EFAULT - Flash Error is set.
+ */
+ *val = vreg_state;
+
+ return 0;
+}
+
+int schgm_flash_init(struct smb_charger *chg)
+{
+ int rc;
+ u8 reg;
+
+ schgm_flash_parse_dt(chg);
+
+ if (chg->flash_derating_soc != -EINVAL) {
+ rc = smblib_write(chg, SCHGM_SOC_BASED_FLASH_DERATE_TH_CFG_REG,
+ chg->flash_derating_soc);
+ if (rc < 0) {
+ pr_err("Couldn't configure SOC for flash derating rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ if (chg->flash_disable_soc != -EINVAL) {
+ rc = smblib_write(chg, SCHGM_SOC_BASED_FLASH_DISABLE_TH_CFG_REG,
+ chg->flash_disable_soc);
+ if (rc < 0) {
+ pr_err("Couldn't configure SOC for flash disable rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ if (chg->headroom_mode != -EINVAL) {
+ /*
+ * configure headroom management policy for
+ * flash and torch mode.
+ */
+ reg = (chg->headroom_mode == FIXED_MODE)
+ ? FORCE_FLASH_BOOST_5V_BIT : 0;
+ rc = smblib_write(chg, SCHGM_FORCE_BOOST_CONTROL, reg);
+ if (rc < 0) {
+ pr_err("Couldn't write force boost control reg rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ reg = (chg->headroom_mode == FIXED_MODE)
+ ? TORCH_PRIORITY_CONTROL_BIT : 0;
+ rc = smblib_write(chg, SCHGM_TORCH_PRIORITY_CONTROL, reg);
+ if (rc < 0) {
+ pr_err("Couldn't force 5V boost in torch mode rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ if ((chg->flash_derating_soc != -EINVAL)
+ || (chg->flash_disable_soc != -EINVAL)) {
+ /* Check if SOC based derating/disable is enabled */
+ rc = smblib_read(chg, SCHGM_FLASH_CONTROL_REG, ®);
+ if (rc < 0) {
+ pr_err("Couldn't read flash control reg rc=%d\n", rc);
+ return rc;
+ }
+ if (!(reg & SOC_LOW_FOR_FLASH_EN_BIT))
+ pr_warn("Soc based flash derating not enabled\n");
+ }
+
+ chg->flash_init_done = true;
+
+ return 0;
+}
diff --git a/drivers/power/supply/qcom/schgm-flash.h b/drivers/power/supply/qcom/schgm-flash.h
new file mode 100644
index 0000000..546e63a
--- /dev/null
+++ b/drivers/power/supply/qcom/schgm-flash.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __SCHGM_FLASH_H__
+#define __SCHGM_FLASH_H__
+
+#include <linux/bitops.h>
+
+#define SCHGM_FLASH_BASE 0xA600
+
+#define SCHGM_FLASH_STATUS_2_REG (SCHGM_FLASH_BASE + 0x07)
+#define VREG_OK_BIT BIT(4)
+
+#define SCHGM_FLASH_STATUS_3_REG (SCHGM_FLASH_BASE + 0x08)
+#define FLASH_STATE_MASK GENMASK(2, 0)
+#define FLASH_ERROR_VAL 0x7
+
+#define SCHGM_FLASH_INT_RT_STS_REG (SCHGM_FLASH_BASE + 0x10)
+
+#define SCHGM_FLASH_STATUS_5_REG (SCHGM_FLASH_BASE + 0x0B)
+
+#define SCHGM_FORCE_BOOST_CONTROL (SCHGM_FLASH_BASE + 0x41)
+#define FORCE_FLASH_BOOST_5V_BIT BIT(0)
+
+#define SCHGM_FLASH_S2_LATCH_RESET_CMD_REG (SCHGM_FLASH_BASE + 0x44)
+#define FLASH_S2_LATCH_RESET_BIT BIT(0)
+
+#define SCHGM_FLASH_CONTROL_REG (SCHGM_FLASH_BASE + 0x60)
+#define SOC_LOW_FOR_FLASH_EN_BIT BIT(7)
+
+#define SCHGM_TORCH_PRIORITY_CONTROL (SCHGM_FLASH_BASE + 0x63)
+#define TORCH_PRIORITY_CONTROL_BIT BIT(0)
+
+#define SCHGM_SOC_BASED_FLASH_DERATE_TH_CFG_REG (SCHGM_FLASH_BASE + 0x67)
+
+#define SCHGM_SOC_BASED_FLASH_DISABLE_TH_CFG_REG \
+ (SCHGM_FLASH_BASE + 0x68)
+
+int schgm_flash_get_vreg_ok(struct smb_charger *chg, int *val);
+int schgm_flash_init(struct smb_charger *chg);
+
+irqreturn_t schgm_flash_default_irq_handler(int irq, void *data);
+irqreturn_t schgm_flash_ilim2_irq_handler(int irq, void *data);
+irqreturn_t schgm_flash_state_change_irq_handler(int irq, void *data);
+#endif /* __SCHGM_FLASH_H__ */
diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c
new file mode 100644
index 0000000..89c3154
--- /dev/null
+++ b/drivers/power/supply/qcom/smb5-lib.c
@@ -0,0 +1,5605 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/power_supply.h>
+#include <linux/regulator/driver.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include <linux/irq.h>
+#include <linux/iio/consumer.h>
+#include <linux/pmic-voter.h>
+#include <linux/of_batterydata.h>
+#include "smb5-lib.h"
+#include "smb5-reg.h"
+#include "battery.h"
+#include "step-chg-jeita.h"
+#include "storm-watch.h"
+
+#define smblib_err(chg, fmt, ...) \
+ pr_err("%s: %s: " fmt, chg->name, \
+ __func__, ##__VA_ARGS__) \
+
+#define smblib_dbg(chg, reason, fmt, ...) \
+ do { \
+ if (*chg->debug_mask & (reason)) \
+ pr_info("%s: %s: " fmt, chg->name, \
+ __func__, ##__VA_ARGS__); \
+ else \
+ pr_debug("%s: %s: " fmt, chg->name, \
+ __func__, ##__VA_ARGS__); \
+ } while (0)
+
+#define typec_rp_med_high(chg, typec_mode) \
+ ((typec_mode == POWER_SUPPLY_TYPEC_SOURCE_MEDIUM \
+ || typec_mode == POWER_SUPPLY_TYPEC_SOURCE_HIGH) \
+ && !chg->typec_legacy)
+
+int smblib_read(struct smb_charger *chg, u16 addr, u8 *val)
+{
+ unsigned int value;
+ int rc = 0;
+
+ rc = regmap_read(chg->regmap, addr, &value);
+ if (rc >= 0)
+ *val = (u8)value;
+
+ return rc;
+}
+
+int smblib_batch_read(struct smb_charger *chg, u16 addr, u8 *val,
+ int count)
+{
+ return regmap_bulk_read(chg->regmap, addr, val, count);
+}
+
+int smblib_write(struct smb_charger *chg, u16 addr, u8 val)
+{
+ return regmap_write(chg->regmap, addr, val);
+}
+
+int smblib_batch_write(struct smb_charger *chg, u16 addr, u8 *val,
+ int count)
+{
+ return regmap_bulk_write(chg->regmap, addr, val, count);
+}
+
+int smblib_masked_write(struct smb_charger *chg, u16 addr, u8 mask, u8 val)
+{
+ return regmap_update_bits(chg->regmap, addr, mask, val);
+}
+
+int smblib_get_iio_channel(struct smb_charger *chg, const char *propname,
+ struct iio_channel **chan)
+{
+ int rc = 0;
+
+ rc = of_property_match_string(chg->dev->of_node,
+ "io-channel-names", propname);
+ if (rc < 0)
+ return 0;
+
+ *chan = iio_channel_get(chg->dev, propname);
+ if (IS_ERR(*chan)) {
+ rc = PTR_ERR(*chan);
+ if (rc != -EPROBE_DEFER)
+ smblib_err(chg, "%s channel unavailable, %d\n",
+ propname, rc);
+ *chan = NULL;
+ }
+
+ return rc;
+}
+
+#define DIV_FACTOR_MICRO_V_I 1
+#define DIV_FACTOR_MILI_V_I 1000
+#define DIV_FACTOR_DECIDEGC 100
+int smblib_read_iio_channel(struct smb_charger *chg, struct iio_channel *chan,
+ int div, int *data)
+{
+ int rc = 0;
+ *data = -ENODATA;
+
+ if (chan) {
+ rc = iio_read_channel_processed(chan, data);
+ if (rc < 0) {
+ smblib_err(chg, "Error in reading IIO channel data, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (div != 0)
+ *data /= div;
+ }
+
+ return rc;
+}
+
+int smblib_get_jeita_cc_delta(struct smb_charger *chg, int *cc_delta_ua)
+{
+ int rc, cc_minus_ua;
+ u8 stat;
+
+ rc = smblib_read(chg, BATTERY_CHARGER_STATUS_7_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (stat & BAT_TEMP_STATUS_HOT_SOFT_BIT) {
+ rc = smblib_get_charge_param(chg, &chg->param.jeita_cc_comp_hot,
+ &cc_minus_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get jeita cc minus rc=%d\n",
+ rc);
+ return rc;
+ }
+ } else if (stat & BAT_TEMP_STATUS_COLD_SOFT_BIT) {
+ rc = smblib_get_charge_param(chg,
+ &chg->param.jeita_cc_comp_cold,
+ &cc_minus_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get jeita cc minus rc=%d\n",
+ rc);
+ return rc;
+ }
+ } else {
+ cc_minus_ua = 0;
+ }
+
+ *cc_delta_ua = -cc_minus_ua;
+
+ return 0;
+}
+
+int smblib_icl_override(struct smb_charger *chg, bool override)
+{
+ int rc;
+
+ rc = smblib_masked_write(chg, USBIN_LOAD_CFG_REG,
+ ICL_OVERRIDE_AFTER_APSD_BIT,
+ override ? ICL_OVERRIDE_AFTER_APSD_BIT : 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't override ICL rc=%d\n", rc);
+
+ return rc;
+}
+
+static int smblib_select_sec_charger(struct smb_charger *chg, int sec_chg)
+{
+ int rc;
+
+ if (sec_chg == chg->sec_chg_selected)
+ return 0;
+
+ switch (sec_chg) {
+ case POWER_SUPPLY_CHARGER_SEC_CP:
+ vote(chg->pl_disable_votable, PL_SMB_EN_VOTER, true, 0);
+
+ /* select Charge Pump instead of slave charger */
+ rc = smblib_masked_write(chg, MISC_SMB_CFG_REG,
+ SMB_EN_SEL_BIT, SMB_EN_SEL_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't select SMB charger rc=%d\n",
+ rc);
+ return rc;
+ }
+ /* Enable Charge Pump, under HW control */
+ rc = smblib_write(chg, MISC_SMB_EN_CMD_REG, EN_CP_CMD_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't enable SMB charger rc=%d\n",
+ rc);
+ return rc;
+ }
+ break;
+ case POWER_SUPPLY_CHARGER_SEC_PL:
+ /* select slave charger instead of Charge Pump */
+ rc = smblib_masked_write(chg, MISC_SMB_CFG_REG,
+ SMB_EN_SEL_BIT, 0);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't select SMB charger rc=%d\n",
+ rc);
+ return rc;
+ }
+ /* Enable slave charger, under HW control */
+ rc = smblib_write(chg, MISC_SMB_EN_CMD_REG, EN_STAT_CMD_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't enable SMB charger rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ vote(chg->pl_disable_votable, PL_SMB_EN_VOTER, false, 0);
+
+ break;
+ case POWER_SUPPLY_CHARGER_SEC_NONE:
+ default:
+ vote(chg->pl_disable_votable, PL_SMB_EN_VOTER, true, 0);
+
+ /* SW override, disabling secondary charger(s) */
+ rc = smblib_write(chg, MISC_SMB_EN_CMD_REG,
+ SMB_EN_OVERRIDE_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't disable charging rc=%d\n",
+ rc);
+ return rc;
+ }
+ break;
+ }
+
+ chg->sec_chg_selected = sec_chg;
+
+ return rc;
+}
+
+static void smblib_notify_extcon_props(struct smb_charger *chg, int id)
+{
+ union extcon_property_value val;
+ union power_supply_propval prop_val;
+
+ if (chg->connector_type == POWER_SUPPLY_CONNECTOR_TYPEC) {
+ smblib_get_prop_typec_cc_orientation(chg, &prop_val);
+ val.intval = ((prop_val.intval == 2) ? 1 : 0);
+ extcon_set_property(chg->extcon, id,
+ EXTCON_PROP_USB_TYPEC_POLARITY, val);
+ }
+
+ val.intval = true;
+ extcon_set_property(chg->extcon, id,
+ EXTCON_PROP_USB_SS, val);
+}
+
+static void smblib_notify_device_mode(struct smb_charger *chg, bool enable)
+{
+ if (enable)
+ smblib_notify_extcon_props(chg, EXTCON_USB);
+
+ extcon_set_state_sync(chg->extcon, EXTCON_USB, enable);
+}
+
+static void smblib_notify_usb_host(struct smb_charger *chg, bool enable)
+{
+ int rc = 0;
+
+ if (enable) {
+ smblib_dbg(chg, PR_OTG, "enabling VBUS in OTG mode\n");
+ rc = smblib_masked_write(chg, DCDC_CMD_OTG_REG,
+ OTG_EN_BIT, OTG_EN_BIT);
+ if (rc < 0) {
+ smblib_err(chg,
+ "Couldn't enable VBUS in OTG mode rc=%d\n", rc);
+ return;
+ }
+
+ smblib_notify_extcon_props(chg, EXTCON_USB_HOST);
+ } else {
+ smblib_dbg(chg, PR_OTG, "disabling VBUS in OTG mode\n");
+ rc = smblib_masked_write(chg, DCDC_CMD_OTG_REG,
+ OTG_EN_BIT, 0);
+ if (rc < 0) {
+ smblib_err(chg,
+ "Couldn't disable VBUS in OTG mode rc=%d\n",
+ rc);
+ return;
+ }
+ }
+
+ extcon_set_state_sync(chg->extcon, EXTCON_USB_HOST, enable);
+}
+
+/********************
+ * REGISTER GETTERS *
+ ********************/
+
+int smblib_get_charge_param(struct smb_charger *chg,
+ struct smb_chg_param *param, int *val_u)
+{
+ int rc = 0;
+ u8 val_raw;
+
+ rc = smblib_read(chg, param->reg, &val_raw);
+ if (rc < 0) {
+ smblib_err(chg, "%s: Couldn't read from 0x%04x rc=%d\n",
+ param->name, param->reg, rc);
+ return rc;
+ }
+
+ if (param->get_proc)
+ *val_u = param->get_proc(param, val_raw);
+ else
+ *val_u = val_raw * param->step_u + param->min_u;
+ smblib_dbg(chg, PR_REGISTER, "%s = %d (0x%02x)\n",
+ param->name, *val_u, val_raw);
+
+ return rc;
+}
+
+int smblib_get_usb_suspend(struct smb_charger *chg, int *suspend)
+{
+ int rc = 0;
+ u8 temp;
+
+ rc = smblib_read(chg, USBIN_CMD_IL_REG, &temp);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read USBIN_CMD_IL rc=%d\n", rc);
+ return rc;
+ }
+ *suspend = temp & USBIN_SUSPEND_BIT;
+
+ return rc;
+}
+
+
+static const s16 therm_lookup_table[] = {
+ /* Index -30C~85C, ADC raw code */
+ 0x6C92, 0x6C43, 0x6BF0, 0x6B98, 0x6B3A, 0x6AD8, 0x6A70, 0x6A03,
+ 0x6990, 0x6916, 0x6897, 0x6811, 0x6785, 0x66F2, 0x6658, 0x65B7,
+ 0x650F, 0x6460, 0x63AA, 0x62EC, 0x6226, 0x6159, 0x6084, 0x5FA8,
+ 0x5EC3, 0x5DD8, 0x5CE4, 0x5BE9, 0x5AE7, 0x59DD, 0x58CD, 0x57B5,
+ 0x5696, 0x5571, 0x5446, 0x5314, 0x51DD, 0x50A0, 0x4F5E, 0x4E17,
+ 0x4CCC, 0x4B7D, 0x4A2A, 0x48D4, 0x477C, 0x4621, 0x44C4, 0x4365,
+ 0x4206, 0x40A6, 0x3F45, 0x3DE6, 0x3C86, 0x3B28, 0x39CC, 0x3872,
+ 0x3719, 0x35C4, 0x3471, 0x3322, 0x31D7, 0x308F, 0x2F4C, 0x2E0D,
+ 0x2CD3, 0x2B9E, 0x2A6E, 0x2943, 0x281D, 0x26FE, 0x25E3, 0x24CF,
+ 0x23C0, 0x22B8, 0x21B5, 0x20B8, 0x1FC2, 0x1ED1, 0x1DE6, 0x1D01,
+ 0x1C22, 0x1B49, 0x1A75, 0x19A8, 0x18E0, 0x181D, 0x1761, 0x16A9,
+ 0x15F7, 0x154A, 0x14A2, 0x13FF, 0x1361, 0x12C8, 0x1234, 0x11A4,
+ 0x1119, 0x1091, 0x100F, 0x0F90, 0x0F15, 0x0E9E, 0x0E2B, 0x0DBC,
+ 0x0D50, 0x0CE8, 0x0C83, 0x0C21, 0x0BC3, 0x0B67, 0x0B0F, 0x0AB9,
+ 0x0A66, 0x0A16, 0x09C9, 0x097E,
+};
+
+int smblib_get_thermal_threshold(struct smb_charger *chg, u16 addr, int *val)
+{
+ u8 buff[2];
+ s16 temp;
+ int rc = 0;
+ int i, lower, upper;
+
+ rc = smblib_batch_read(chg, addr, buff, 2);
+ if (rc < 0) {
+ pr_err("failed to write to 0x%04X, rc=%d\n", addr, rc);
+ return rc;
+ }
+
+ temp = buff[1] | buff[0] << 8;
+
+ lower = 0;
+ upper = ARRAY_SIZE(therm_lookup_table) - 1;
+ while (lower <= upper) {
+ i = (upper + lower) / 2;
+ if (therm_lookup_table[i] < temp)
+ upper = i - 1;
+ else if (therm_lookup_table[i] > temp)
+ lower = i + 1;
+ else
+ break;
+ }
+
+ /* index 0 corresonds to -30C */
+ *val = (i - 30) * 10;
+
+ return rc;
+}
+
+struct apsd_result {
+ const char * const name;
+ const u8 bit;
+ const enum power_supply_type pst;
+};
+
+enum {
+ UNKNOWN,
+ SDP,
+ CDP,
+ DCP,
+ OCP,
+ FLOAT,
+ HVDCP2,
+ HVDCP3,
+ MAX_TYPES
+};
+
+static const struct apsd_result smblib_apsd_results[] = {
+ [UNKNOWN] = {
+ .name = "UNKNOWN",
+ .bit = 0,
+ .pst = POWER_SUPPLY_TYPE_UNKNOWN
+ },
+ [SDP] = {
+ .name = "SDP",
+ .bit = SDP_CHARGER_BIT,
+ .pst = POWER_SUPPLY_TYPE_USB
+ },
+ [CDP] = {
+ .name = "CDP",
+ .bit = CDP_CHARGER_BIT,
+ .pst = POWER_SUPPLY_TYPE_USB_CDP
+ },
+ [DCP] = {
+ .name = "DCP",
+ .bit = DCP_CHARGER_BIT,
+ .pst = POWER_SUPPLY_TYPE_USB_DCP
+ },
+ [OCP] = {
+ .name = "OCP",
+ .bit = OCP_CHARGER_BIT,
+ .pst = POWER_SUPPLY_TYPE_USB_DCP
+ },
+ [FLOAT] = {
+ .name = "FLOAT",
+ .bit = FLOAT_CHARGER_BIT,
+ .pst = POWER_SUPPLY_TYPE_USB_FLOAT
+ },
+ [HVDCP2] = {
+ .name = "HVDCP2",
+ .bit = DCP_CHARGER_BIT | QC_2P0_BIT,
+ .pst = POWER_SUPPLY_TYPE_USB_HVDCP
+ },
+ [HVDCP3] = {
+ .name = "HVDCP3",
+ .bit = DCP_CHARGER_BIT | QC_3P0_BIT,
+ .pst = POWER_SUPPLY_TYPE_USB_HVDCP_3,
+ },
+};
+
+static const struct apsd_result *smblib_get_apsd_result(struct smb_charger *chg)
+{
+ int rc, i;
+ u8 apsd_stat, stat;
+ const struct apsd_result *result = &smblib_apsd_results[UNKNOWN];
+
+ rc = smblib_read(chg, APSD_STATUS_REG, &apsd_stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read APSD_STATUS rc=%d\n", rc);
+ return result;
+ }
+ smblib_dbg(chg, PR_REGISTER, "APSD_STATUS = 0x%02x\n", apsd_stat);
+
+ if (!(apsd_stat & APSD_DTC_STATUS_DONE_BIT))
+ return result;
+
+ rc = smblib_read(chg, APSD_RESULT_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read APSD_RESULT_STATUS rc=%d\n",
+ rc);
+ return result;
+ }
+ stat &= APSD_RESULT_STATUS_MASK;
+
+ for (i = 0; i < ARRAY_SIZE(smblib_apsd_results); i++) {
+ if (smblib_apsd_results[i].bit == stat)
+ result = &smblib_apsd_results[i];
+ }
+
+ if (apsd_stat & QC_CHARGER_BIT) {
+ /* since its a qc_charger, either return HVDCP3 or HVDCP2 */
+ if (result != &smblib_apsd_results[HVDCP3])
+ result = &smblib_apsd_results[HVDCP2];
+ }
+
+ return result;
+}
+
+#define INPUT_NOT_PRESENT 0
+#define INPUT_PRESENT_USB BIT(1)
+#define INPUT_PRESENT_DC BIT(2)
+static int smblib_is_input_present(struct smb_charger *chg,
+ int *present)
+{
+ int rc;
+ union power_supply_propval pval = {0, };
+
+ *present = INPUT_NOT_PRESENT;
+
+ rc = smblib_get_prop_usb_present(chg, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get usb presence status rc=%d\n", rc);
+ return rc;
+ }
+ *present |= pval.intval ? INPUT_PRESENT_USB : INPUT_NOT_PRESENT;
+
+ rc = smblib_get_prop_dc_present(chg, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get dc presence status rc=%d\n", rc);
+ return rc;
+ }
+ *present |= pval.intval ? INPUT_PRESENT_DC : INPUT_NOT_PRESENT;
+
+ return 0;
+}
+
+/********************
+ * REGISTER SETTERS *
+ ********************/
+static const struct buck_boost_freq chg_freq_list[] = {
+ [0] = {
+ .freq_khz = 2400,
+ .val = 7,
+ },
+ [1] = {
+ .freq_khz = 2100,
+ .val = 8,
+ },
+ [2] = {
+ .freq_khz = 1600,
+ .val = 11,
+ },
+ [3] = {
+ .freq_khz = 1200,
+ .val = 15,
+ },
+};
+
+int smblib_set_chg_freq(struct smb_chg_param *param,
+ int val_u, u8 *val_raw)
+{
+ u8 i;
+
+ if (val_u > param->max_u || val_u < param->min_u)
+ return -EINVAL;
+
+ /* Charger FSW is the configured freqency / 2 */
+ val_u *= 2;
+ for (i = 0; i < ARRAY_SIZE(chg_freq_list); i++) {
+ if (chg_freq_list[i].freq_khz == val_u)
+ break;
+ }
+ if (i == ARRAY_SIZE(chg_freq_list)) {
+ pr_err("Invalid frequency %d Hz\n", val_u / 2);
+ return -EINVAL;
+ }
+
+ *val_raw = chg_freq_list[i].val;
+
+ return 0;
+}
+
+int smblib_set_opt_switcher_freq(struct smb_charger *chg, int fsw_khz)
+{
+ union power_supply_propval pval = {0, };
+ int rc = 0;
+
+ rc = smblib_set_charge_param(chg, &chg->param.freq_switcher, fsw_khz);
+ if (rc < 0)
+ dev_err(chg->dev, "Error in setting freq_buck rc=%d\n", rc);
+
+ if (chg->mode == PARALLEL_MASTER && chg->pl.psy) {
+ pval.intval = fsw_khz;
+ /*
+ * Some parallel charging implementations may not have
+ * PROP_BUCK_FREQ property - they could be running
+ * with a fixed frequency
+ */
+ power_supply_set_property(chg->pl.psy,
+ POWER_SUPPLY_PROP_BUCK_FREQ, &pval);
+ }
+
+ return rc;
+}
+
+int smblib_set_charge_param(struct smb_charger *chg,
+ struct smb_chg_param *param, int val_u)
+{
+ int rc = 0;
+ u8 val_raw;
+
+ if (param->set_proc) {
+ rc = param->set_proc(param, val_u, &val_raw);
+ if (rc < 0)
+ return -EINVAL;
+ } else {
+ if (val_u > param->max_u || val_u < param->min_u)
+ smblib_dbg(chg, PR_MISC,
+ "%s: %d is out of range [%d, %d]\n",
+ param->name, val_u, param->min_u, param->max_u);
+
+ if (val_u > param->max_u)
+ val_u = param->max_u;
+ if (val_u < param->min_u)
+ val_u = param->min_u;
+
+ val_raw = (val_u - param->min_u) / param->step_u;
+ }
+
+ rc = smblib_write(chg, param->reg, val_raw);
+ if (rc < 0) {
+ smblib_err(chg, "%s: Couldn't write 0x%02x to 0x%04x rc=%d\n",
+ param->name, val_raw, param->reg, rc);
+ return rc;
+ }
+
+ smblib_dbg(chg, PR_REGISTER, "%s = %d (0x%02x)\n",
+ param->name, val_u, val_raw);
+
+ return rc;
+}
+
+int smblib_set_usb_suspend(struct smb_charger *chg, bool suspend)
+{
+ int rc = 0;
+ int irq = chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq;
+
+ if (suspend && irq) {
+ if (chg->usb_icl_change_irq_enabled) {
+ disable_irq_nosync(irq);
+ chg->usb_icl_change_irq_enabled = false;
+ }
+ }
+
+ rc = smblib_masked_write(chg, USBIN_CMD_IL_REG, USBIN_SUSPEND_BIT,
+ suspend ? USBIN_SUSPEND_BIT : 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't write %s to USBIN_SUSPEND_BIT rc=%d\n",
+ suspend ? "suspend" : "resume", rc);
+
+ if (!suspend && irq) {
+ if (!chg->usb_icl_change_irq_enabled) {
+ enable_irq(irq);
+ chg->usb_icl_change_irq_enabled = true;
+ }
+ }
+
+ return rc;
+}
+
+int smblib_set_dc_suspend(struct smb_charger *chg, bool suspend)
+{
+ int rc = 0;
+
+ rc = smblib_masked_write(chg, DCIN_CMD_IL_REG, DCIN_SUSPEND_BIT,
+ suspend ? DCIN_SUSPEND_BIT : 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't write %s to DCIN_SUSPEND_BIT rc=%d\n",
+ suspend ? "suspend" : "resume", rc);
+
+ return rc;
+}
+
+static int smblib_set_adapter_allowance(struct smb_charger *chg,
+ u8 allowed_voltage)
+{
+ int rc = 0;
+
+ rc = smblib_write(chg, USBIN_ADAPTER_ALLOW_CFG_REG, allowed_voltage);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't write 0x%02x to USBIN_ADAPTER_ALLOW_CFG rc=%d\n",
+ allowed_voltage, rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+#define MICRO_5V 5000000
+#define MICRO_9V 9000000
+#define MICRO_12V 12000000
+static int smblib_set_usb_pd_fsw(struct smb_charger *chg, int voltage)
+{
+ int rc = 0;
+
+ if (voltage == MICRO_5V)
+ rc = smblib_set_opt_switcher_freq(chg, chg->chg_freq.freq_5V);
+ else if (voltage > MICRO_5V && voltage < MICRO_9V)
+ rc = smblib_set_opt_switcher_freq(chg,
+ chg->chg_freq.freq_6V_8V);
+ else if (voltage >= MICRO_9V && voltage < MICRO_12V)
+ rc = smblib_set_opt_switcher_freq(chg, chg->chg_freq.freq_9V);
+ else if (voltage == MICRO_12V)
+ rc = smblib_set_opt_switcher_freq(chg, chg->chg_freq.freq_12V);
+ else {
+ smblib_err(chg, "Couldn't set Fsw: invalid voltage %d\n",
+ voltage);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int smblib_set_usb_pd_allowed_voltage(struct smb_charger *chg,
+ int min_allowed_uv, int max_allowed_uv)
+{
+ int rc;
+ u8 allowed_voltage;
+
+ if (min_allowed_uv == MICRO_5V && max_allowed_uv == MICRO_5V) {
+ allowed_voltage = USBIN_ADAPTER_ALLOW_5V;
+ } else if (min_allowed_uv == MICRO_9V && max_allowed_uv == MICRO_9V) {
+ allowed_voltage = USBIN_ADAPTER_ALLOW_9V;
+ } else if (min_allowed_uv == MICRO_12V && max_allowed_uv == MICRO_12V) {
+ allowed_voltage = USBIN_ADAPTER_ALLOW_12V;
+ } else if (min_allowed_uv < MICRO_9V && max_allowed_uv <= MICRO_9V) {
+ allowed_voltage = USBIN_ADAPTER_ALLOW_5V_TO_9V;
+ } else if (min_allowed_uv < MICRO_9V && max_allowed_uv <= MICRO_12V) {
+ allowed_voltage = USBIN_ADAPTER_ALLOW_5V_TO_12V;
+ } else if (min_allowed_uv < MICRO_12V && max_allowed_uv <= MICRO_12V) {
+ allowed_voltage = USBIN_ADAPTER_ALLOW_9V_TO_12V;
+ } else {
+ smblib_err(chg, "invalid allowed voltage [%d, %d]\n",
+ min_allowed_uv, max_allowed_uv);
+ return -EINVAL;
+ }
+
+ rc = smblib_set_adapter_allowance(chg, allowed_voltage);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't configure adapter allowance rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+/********************
+ * HELPER FUNCTIONS *
+ ********************/
+
+int smblib_get_prop_from_bms(struct smb_charger *chg,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ int rc;
+
+ if (!chg->bms_psy)
+ return -EINVAL;
+
+ rc = power_supply_get_property(chg->bms_psy, psp, val);
+
+ return rc;
+}
+
+int smblib_configure_hvdcp_apsd(struct smb_charger *chg, bool enable)
+{
+ int rc;
+ u8 mask = HVDCP_EN_BIT | BC1P2_SRC_DETECT_BIT;
+
+ if (chg->pd_not_supported)
+ return 0;
+
+ rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG, mask,
+ enable ? mask : 0);
+ if (rc < 0)
+ smblib_err(chg, "failed to write USBIN_OPTIONS_1_CFG rc=%d\n",
+ rc);
+
+ return rc;
+}
+
+static int smblib_request_dpdm(struct smb_charger *chg, bool enable)
+{
+ int rc = 0;
+
+ /* fetch the DPDM regulator */
+ if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
+ "dpdm-supply", NULL)) {
+ chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm");
+ if (IS_ERR(chg->dpdm_reg)) {
+ rc = PTR_ERR(chg->dpdm_reg);
+ smblib_err(chg, "Couldn't get dpdm regulator rc=%d\n",
+ rc);
+ chg->dpdm_reg = NULL;
+ return rc;
+ }
+ }
+
+ if (enable) {
+ if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) {
+ smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
+ rc = regulator_enable(chg->dpdm_reg);
+ if (rc < 0)
+ smblib_err(chg,
+ "Couldn't enable dpdm regulator rc=%d\n",
+ rc);
+ }
+ } else {
+ if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
+ smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
+ rc = regulator_disable(chg->dpdm_reg);
+ if (rc < 0)
+ smblib_err(chg,
+ "Couldn't disable dpdm regulator rc=%d\n",
+ rc);
+ }
+ }
+
+ return rc;
+}
+
+static void smblib_rerun_apsd(struct smb_charger *chg)
+{
+ int rc;
+
+ smblib_dbg(chg, PR_MISC, "re-running APSD\n");
+
+ rc = smblib_masked_write(chg, CMD_APSD_REG,
+ APSD_RERUN_BIT, APSD_RERUN_BIT);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't re-run APSD rc=%d\n", rc);
+}
+
+static const struct apsd_result *smblib_update_usb_type(struct smb_charger *chg)
+{
+ const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
+
+ /* if PD is active, APSD is disabled so won't have a valid result */
+ if (chg->pd_active) {
+ chg->real_charger_type = POWER_SUPPLY_TYPE_USB_PD;
+ } else {
+ /*
+ * Update real charger type only if its not FLOAT
+ * detected as as SDP
+ */
+ if (!(apsd_result->pst == POWER_SUPPLY_TYPE_USB_FLOAT &&
+ chg->real_charger_type == POWER_SUPPLY_TYPE_USB))
+ chg->real_charger_type = apsd_result->pst;
+ }
+
+ smblib_dbg(chg, PR_MISC, "APSD=%s PD=%d\n",
+ apsd_result->name, chg->pd_active);
+ return apsd_result;
+}
+
+static int smblib_notifier_call(struct notifier_block *nb,
+ unsigned long ev, void *v)
+{
+ struct power_supply *psy = v;
+ struct smb_charger *chg = container_of(nb, struct smb_charger, nb);
+
+ if (!strcmp(psy->desc->name, "bms")) {
+ if (!chg->bms_psy)
+ chg->bms_psy = psy;
+ if (ev == PSY_EVENT_PROP_CHANGED)
+ schedule_work(&chg->bms_update_work);
+ }
+
+ if (!chg->jeita_configured)
+ schedule_work(&chg->jeita_update_work);
+
+ if (chg->sec_pl_present && !chg->pl.psy
+ && !strcmp(psy->desc->name, "parallel")) {
+ chg->pl.psy = psy;
+ schedule_work(&chg->pl_update_work);
+ }
+
+ return NOTIFY_OK;
+}
+
+static int smblib_register_notifier(struct smb_charger *chg)
+{
+ int rc;
+
+ chg->nb.notifier_call = smblib_notifier_call;
+ rc = power_supply_reg_notifier(&chg->nb);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't register psy notifier rc = %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+int smblib_mapping_soc_from_field_value(struct smb_chg_param *param,
+ int val_u, u8 *val_raw)
+{
+ if (val_u > param->max_u || val_u < param->min_u)
+ return -EINVAL;
+
+ *val_raw = val_u << 1;
+
+ return 0;
+}
+
+int smblib_mapping_cc_delta_to_field_value(struct smb_chg_param *param,
+ u8 val_raw)
+{
+ int val_u = val_raw * param->step_u + param->min_u;
+
+ if (val_u > param->max_u)
+ val_u -= param->max_u * 2;
+
+ return val_u;
+}
+
+int smblib_mapping_cc_delta_from_field_value(struct smb_chg_param *param,
+ int val_u, u8 *val_raw)
+{
+ if (val_u > param->max_u || val_u < param->min_u - param->max_u)
+ return -EINVAL;
+
+ val_u += param->max_u * 2 - param->min_u;
+ val_u %= param->max_u * 2;
+ *val_raw = val_u / param->step_u;
+
+ return 0;
+}
+
+#define SDP_100_MA 100000
+static void smblib_uusb_removal(struct smb_charger *chg)
+{
+ int rc;
+ struct smb_irq_data *data;
+ struct storm_watch *wdata;
+
+ mutex_lock(&chg->smb_lock);
+ chg->cp_reason = POWER_SUPPLY_CP_NONE;
+ rc = smblib_select_sec_charger(chg,
+ chg->sec_pl_present ? POWER_SUPPLY_CHARGER_SEC_PL :
+ POWER_SUPPLY_CHARGER_SEC_NONE);
+ if (rc < 0)
+ dev_err(chg->dev, "Couldn't disable secondary charger rc=%d\n",
+ rc);
+ mutex_unlock(&chg->smb_lock);
+
+ cancel_delayed_work_sync(&chg->pl_enable_work);
+
+ if (chg->wa_flags & BOOST_BACK_WA) {
+ data = chg->irq_info[SWITCHER_POWER_OK_IRQ].irq_data;
+ if (data) {
+ wdata = &data->storm_data;
+ update_storm_count(wdata, WEAK_CHG_STORM_COUNT);
+ vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0);
+ vote(chg->usb_icl_votable, WEAK_CHARGER_VOTER,
+ false, 0);
+ }
+ }
+ vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
+ vote(chg->awake_votable, PL_DELAY_VOTER, false, 0);
+
+ /* reset both usbin current and voltage votes */
+ vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
+ vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
+ vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true, SDP_100_MA);
+ vote(chg->usb_icl_votable, SW_QC3_VOTER, false, 0);
+
+ /* Remove SW thermal regulation WA votes */
+ vote(chg->usb_icl_votable, SW_THERM_REGULATION_VOTER, false, 0);
+ vote(chg->pl_disable_votable, SW_THERM_REGULATION_VOTER, false, 0);
+ vote(chg->dc_suspend_votable, SW_THERM_REGULATION_VOTER, false, 0);
+ if (chg->cp_disable_votable)
+ vote(chg->cp_disable_votable, SW_THERM_REGULATION_VOTER,
+ false, 0);
+
+ /* reconfigure allowed voltage for HVDCP */
+ rc = smblib_set_adapter_allowance(chg,
+ USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't set USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V rc=%d\n",
+ rc);
+
+ chg->voltage_min_uv = MICRO_5V;
+ chg->voltage_max_uv = MICRO_5V;
+ chg->usb_icl_delta_ua = 0;
+ chg->pulse_cnt = 0;
+ chg->uusb_apsd_rerun_done = false;
+
+ /* write back the default FLOAT charger configuration */
+ rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+ (u8)FLOAT_OPTIONS_MASK, chg->float_cfg);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't write float charger options rc=%d\n",
+ rc);
+
+ /* clear USB ICL vote for USB_PSY_VOTER */
+ rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't un-vote for USB ICL rc=%d\n", rc);
+
+ /* clear USB ICL vote for DCP_VOTER */
+ rc = vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
+ if (rc < 0)
+ smblib_err(chg,
+ "Couldn't un-vote DCP from USB ICL rc=%d\n", rc);
+
+ /*
+ * if non-compliant charger caused UV, restore original max pulses
+ * and turn SUSPEND_ON_COLLAPSE_USBIN_BIT back on.
+ */
+ if (chg->qc2_unsupported_voltage) {
+ rc = smblib_masked_write(chg, HVDCP_PULSE_COUNT_MAX_REG,
+ HVDCP_PULSE_COUNT_MAX_QC2_MASK,
+ chg->qc2_max_pulses);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't restore max pulses rc=%d\n",
+ rc);
+
+ rc = smblib_masked_write(chg, USBIN_AICL_OPTIONS_CFG_REG,
+ SUSPEND_ON_COLLAPSE_USBIN_BIT,
+ SUSPEND_ON_COLLAPSE_USBIN_BIT);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't turn on SUSPEND_ON_COLLAPSE_USBIN_BIT rc=%d\n",
+ rc);
+
+ chg->qc2_unsupported_voltage = QC2_COMPLIANT;
+ }
+}
+
+void smblib_suspend_on_debug_battery(struct smb_charger *chg)
+{
+ int rc;
+ union power_supply_propval val;
+
+ rc = smblib_get_prop_from_bms(chg,
+ POWER_SUPPLY_PROP_DEBUG_BATTERY, &val);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get debug battery prop rc=%d\n", rc);
+ return;
+ }
+ if (chg->suspend_input_on_debug_batt) {
+ vote(chg->usb_icl_votable, DEBUG_BOARD_VOTER, val.intval, 0);
+ vote(chg->dc_suspend_votable, DEBUG_BOARD_VOTER, val.intval, 0);
+ if (val.intval)
+ pr_info("Input suspended: Fake battery\n");
+ } else {
+ vote(chg->chg_disable_votable, DEBUG_BOARD_VOTER,
+ val.intval, 0);
+ }
+}
+
+int smblib_rerun_apsd_if_required(struct smb_charger *chg)
+{
+ union power_supply_propval val;
+ int rc;
+
+ rc = smblib_get_prop_usb_present(chg, &val);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get usb present rc = %d\n", rc);
+ return rc;
+ }
+
+ if (!val.intval)
+ return 0;
+
+ rc = smblib_request_dpdm(chg, true);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc);
+
+ chg->uusb_apsd_rerun_done = true;
+ smblib_rerun_apsd(chg);
+
+ return 0;
+}
+
+static int smblib_get_pulse_cnt(struct smb_charger *chg, int *count)
+{
+ *count = chg->pulse_cnt;
+ return 0;
+}
+
+#define USBIN_25MA 25000
+#define USBIN_100MA 100000
+#define USBIN_150MA 150000
+#define USBIN_500MA 500000
+#define USBIN_900MA 900000
+static int set_sdp_current(struct smb_charger *chg, int icl_ua)
+{
+ int rc;
+ u8 icl_options;
+ const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
+
+ /* power source is SDP */
+ switch (icl_ua) {
+ case USBIN_100MA:
+ /* USB 2.0 100mA */
+ icl_options = 0;
+ break;
+ case USBIN_150MA:
+ /* USB 3.0 150mA */
+ icl_options = CFG_USB3P0_SEL_BIT;
+ break;
+ case USBIN_500MA:
+ /* USB 2.0 500mA */
+ icl_options = USB51_MODE_BIT;
+ break;
+ case USBIN_900MA:
+ /* USB 3.0 900mA */
+ icl_options = CFG_USB3P0_SEL_BIT | USB51_MODE_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB &&
+ apsd_result->pst == POWER_SUPPLY_TYPE_USB_FLOAT) {
+ /*
+ * change the float charger configuration to SDP, if this
+ * is the case of SDP being detected as FLOAT
+ */
+ rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+ FORCE_FLOAT_SDP_CFG_BIT, FORCE_FLOAT_SDP_CFG_BIT);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set float ICL options rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = smblib_masked_write(chg, USBIN_ICL_OPTIONS_REG,
+ CFG_USB3P0_SEL_BIT | USB51_MODE_BIT, icl_options);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set ICL options rc=%d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int get_sdp_current(struct smb_charger *chg, int *icl_ua)
+{
+ int rc;
+ u8 icl_options;
+ bool usb3 = false;
+
+ rc = smblib_read(chg, USBIN_ICL_OPTIONS_REG, &icl_options);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get ICL options rc=%d\n", rc);
+ return rc;
+ }
+
+ usb3 = (icl_options & CFG_USB3P0_SEL_BIT);
+
+ if (icl_options & USB51_MODE_BIT)
+ *icl_ua = usb3 ? USBIN_900MA : USBIN_500MA;
+ else
+ *icl_ua = usb3 ? USBIN_150MA : USBIN_100MA;
+
+ return rc;
+}
+
+int smblib_set_icl_current(struct smb_charger *chg, int icl_ua)
+{
+ int rc = 0;
+ bool hc_mode = false, override = false;
+ /* suspend if 25mA or less is requested */
+ bool suspend = (icl_ua <= USBIN_25MA);
+
+ if (chg->connector_type == POWER_SUPPLY_CONNECTOR_TYPEC) {
+ rc = smblib_masked_write(chg, USB_CMD_PULLDOWN_REG,
+ EN_PULLDOWN_USB_IN_BIT,
+ suspend ? 0 : EN_PULLDOWN_USB_IN_BIT);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't write %s to EN_PULLDOWN_USB_IN_BIT rc=%d\n",
+ suspend ? "disable" : "enable", rc);
+ goto out;
+ }
+ }
+
+ if (suspend)
+ return smblib_set_usb_suspend(chg, true);
+
+ if (icl_ua == INT_MAX)
+ goto set_mode;
+
+ /* configure current */
+ if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB
+ && (chg->typec_legacy
+ || chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
+ || chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)) {
+ rc = set_sdp_current(chg, icl_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set SDP ICL rc=%d\n", rc);
+ goto out;
+ }
+ } else {
+ /*
+ * Try USB 2.0/3,0 option first on USB path when maximum input
+ * current limit is 500mA or below for better accuracy; in case
+ * of error, proceed to use USB high-current mode.
+ */
+ if (icl_ua <= USBIN_500MA) {
+ rc = set_sdp_current(chg, icl_ua);
+ if (rc >= 0)
+ goto out;
+ }
+
+ rc = smblib_set_charge_param(chg, &chg->param.usb_icl, icl_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set HC ICL rc=%d\n", rc);
+ goto out;
+ }
+ hc_mode = true;
+
+ /*
+ * Micro USB mode follows ICL register independent of override
+ * bit, configure override only for typeC mode.
+ */
+ if (chg->connector_type == POWER_SUPPLY_CONNECTOR_TYPEC)
+ override = true;
+ }
+
+set_mode:
+ rc = smblib_masked_write(chg, USBIN_ICL_OPTIONS_REG,
+ USBIN_MODE_CHG_BIT, hc_mode ? USBIN_MODE_CHG_BIT : 0);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set USBIN_ICL_OPTIONS rc=%d\n", rc);
+ goto out;
+ }
+
+ rc = smblib_icl_override(chg, override);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set ICL override rc=%d\n", rc);
+ goto out;
+ }
+
+ /* unsuspend after configuring current and override */
+ rc = smblib_set_usb_suspend(chg, false);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't resume input rc=%d\n", rc);
+ goto out;
+ }
+
+ /* Re-run AICL */
+ if (chg->real_charger_type != POWER_SUPPLY_TYPE_USB)
+ rc = smblib_rerun_aicl(chg);
+out:
+ return rc;
+}
+
+int smblib_get_icl_current(struct smb_charger *chg, int *icl_ua)
+{
+ int rc = 0;
+ u8 load_cfg;
+ bool override;
+
+ if ((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
+ || chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
+ && (chg->usb_psy->desc->type == POWER_SUPPLY_TYPE_USB)) {
+ rc = get_sdp_current(chg, icl_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get SDP ICL rc=%d\n", rc);
+ return rc;
+ }
+ } else {
+ rc = smblib_read(chg, USBIN_LOAD_CFG_REG, &load_cfg);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get load cfg rc=%d\n", rc);
+ return rc;
+ }
+ override = load_cfg & ICL_OVERRIDE_AFTER_APSD_BIT;
+ if (!override)
+ return INT_MAX;
+
+ /* override is set */
+ rc = smblib_get_charge_param(chg, &chg->param.icl_max_stat,
+ icl_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get HC ICL rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+int smblib_toggle_smb_en(struct smb_charger *chg, int toggle)
+{
+ int rc = 0;
+
+ if (!toggle)
+ return rc;
+
+ mutex_lock(&chg->smb_lock);
+
+ if (chg->sec_chg_selected == POWER_SUPPLY_CHARGER_SEC_CP) {
+ /* Pull down SMB_EN pin */
+ rc = smblib_select_sec_charger(chg,
+ POWER_SUPPLY_CHARGER_SEC_NONE);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't disable SMB_EN pin rc=%d\n",
+ rc);
+ goto out;
+ }
+
+ /*
+ * A minimum of 20us delay is expected before switching on STAT
+ * pin.
+ */
+ usleep_range(20, 30);
+
+ /* Pull up SMB_EN pin and enable Charge Pump under HW control */
+ rc = smblib_select_sec_charger(chg,
+ POWER_SUPPLY_CHARGER_SEC_CP);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't enable CP rc=%d\n",
+ rc);
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&chg->smb_lock);
+
+ return rc;
+}
+
+/*********************
+ * VOTABLE CALLBACKS *
+ *********************/
+
+static int smblib_dc_suspend_vote_callback(struct votable *votable, void *data,
+ int suspend, const char *client)
+{
+ struct smb_charger *chg = data;
+
+ if (chg->smb_version == PMI632_SUBTYPE)
+ return 0;
+
+ /* resume input if suspend is invalid */
+ if (suspend < 0)
+ suspend = 0;
+
+ return smblib_set_dc_suspend(chg, (bool)suspend);
+}
+
+static int smblib_awake_vote_callback(struct votable *votable, void *data,
+ int awake, const char *client)
+{
+ struct smb_charger *chg = data;
+
+ if (awake)
+ pm_stay_awake(chg->dev);
+ else
+ pm_relax(chg->dev);
+
+ return 0;
+}
+
+static int smblib_chg_disable_vote_callback(struct votable *votable, void *data,
+ int chg_disable, const char *client)
+{
+ struct smb_charger *chg = data;
+ int rc;
+
+ rc = smblib_masked_write(chg, CHARGING_ENABLE_CMD_REG,
+ CHARGING_ENABLE_CMD_BIT,
+ chg_disable ? 0 : CHARGING_ENABLE_CMD_BIT);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't %s charging rc=%d\n",
+ chg_disable ? "disable" : "enable", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int smblib_usb_irq_enable_vote_callback(struct votable *votable,
+ void *data, int enable, const char *client)
+{
+ struct smb_charger *chg = data;
+
+ if (!chg->irq_info[INPUT_CURRENT_LIMITING_IRQ].irq ||
+ !chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq)
+ return 0;
+
+ if (enable) {
+ enable_irq(chg->irq_info[INPUT_CURRENT_LIMITING_IRQ].irq);
+ enable_irq(chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq);
+ } else {
+ disable_irq_nosync(
+ chg->irq_info[INPUT_CURRENT_LIMITING_IRQ].irq);
+ disable_irq_nosync(chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq);
+ }
+
+ return 0;
+}
+
+static int smblib_wdog_snarl_irq_en_vote_callback(struct votable *votable,
+ void *data, int enable, const char *client)
+{
+ struct smb_charger *chg = data;
+
+ if (!chg->irq_info[WDOG_SNARL_IRQ].irq)
+ return 0;
+
+ if (enable) {
+ enable_irq(chg->irq_info[WDOG_SNARL_IRQ].irq);
+ enable_irq_wake(chg->irq_info[WDOG_SNARL_IRQ].irq);
+ } else {
+ disable_irq_wake(chg->irq_info[WDOG_SNARL_IRQ].irq);
+ disable_irq_nosync(chg->irq_info[WDOG_SNARL_IRQ].irq);
+ }
+
+ return 0;
+}
+
+/*******************
+ * VCONN REGULATOR *
+ * *****************/
+
+int smblib_vconn_regulator_enable(struct regulator_dev *rdev)
+{
+ struct smb_charger *chg = rdev_get_drvdata(rdev);
+ int rc = 0;
+ u8 stat, orientation;
+
+ smblib_dbg(chg, PR_OTG, "enabling VCONN\n");
+
+ rc = smblib_read(chg, TYPE_C_MISC_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+ return rc;
+ }
+
+ /* VCONN orientation is opposite to that of CC */
+ orientation =
+ stat & TYPEC_CCOUT_VALUE_BIT ? 0 : VCONN_EN_ORIENTATION_BIT;
+ rc = smblib_masked_write(chg, TYPE_C_VCONN_CONTROL_REG,
+ VCONN_EN_VALUE_BIT | VCONN_EN_ORIENTATION_BIT,
+ VCONN_EN_VALUE_BIT | orientation);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_CCOUT_CONTROL_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+int smblib_vconn_regulator_disable(struct regulator_dev *rdev)
+{
+ struct smb_charger *chg = rdev_get_drvdata(rdev);
+ int rc = 0;
+
+ smblib_dbg(chg, PR_OTG, "disabling VCONN\n");
+ rc = smblib_masked_write(chg, TYPE_C_VCONN_CONTROL_REG,
+ VCONN_EN_VALUE_BIT, 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't disable vconn regulator rc=%d\n", rc);
+
+ return 0;
+}
+
+int smblib_vconn_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct smb_charger *chg = rdev_get_drvdata(rdev);
+ int rc;
+ u8 cmd;
+
+ rc = smblib_read(chg, TYPE_C_VCONN_CONTROL_REG, &cmd);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_INTRPT_ENB_SOFTWARE_CTRL rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ return (cmd & VCONN_EN_VALUE_BIT) ? 1 : 0;
+}
+
+/*****************
+ * OTG REGULATOR *
+ *****************/
+
+int smblib_vbus_regulator_enable(struct regulator_dev *rdev)
+{
+ struct smb_charger *chg = rdev_get_drvdata(rdev);
+ int rc;
+
+ smblib_dbg(chg, PR_OTG, "enabling OTG\n");
+
+ rc = smblib_masked_write(chg, DCDC_CMD_OTG_REG, OTG_EN_BIT, OTG_EN_BIT);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't enable OTG rc=%d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+int smblib_vbus_regulator_disable(struct regulator_dev *rdev)
+{
+ struct smb_charger *chg = rdev_get_drvdata(rdev);
+ int rc;
+
+ smblib_dbg(chg, PR_OTG, "disabling OTG\n");
+
+ rc = smblib_masked_write(chg, DCDC_CMD_OTG_REG, OTG_EN_BIT, 0);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't disable OTG regulator rc=%d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+int smblib_vbus_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct smb_charger *chg = rdev_get_drvdata(rdev);
+ int rc = 0;
+ u8 cmd;
+
+ rc = smblib_read(chg, DCDC_CMD_OTG_REG, &cmd);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read CMD_OTG rc=%d", rc);
+ return rc;
+ }
+
+ return (cmd & OTG_EN_BIT) ? 1 : 0;
+}
+
+/********************
+ * BATT PSY GETTERS *
+ ********************/
+
+int smblib_get_prop_input_suspend(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ val->intval
+ = (get_client_vote(chg->usb_icl_votable, USER_VOTER) == 0)
+ && get_client_vote(chg->dc_suspend_votable, USER_VOTER);
+ return 0;
+}
+
+int smblib_get_prop_batt_present(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc;
+ u8 stat;
+
+ rc = smblib_read(chg, BATIF_BASE + INT_RT_STS_OFFSET, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read BATIF_INT_RT_STS rc=%d\n", rc);
+ return rc;
+ }
+
+ val->intval = !(stat & (BAT_THERM_OR_ID_MISSING_RT_STS_BIT
+ | BAT_TERMINAL_MISSING_RT_STS_BIT));
+
+ return rc;
+}
+
+int smblib_get_prop_batt_capacity(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc = -EINVAL;
+
+ if (chg->fake_capacity >= 0) {
+ val->intval = chg->fake_capacity;
+ return 0;
+ }
+
+ rc = smblib_get_prop_from_bms(chg, POWER_SUPPLY_PROP_CAPACITY, val);
+
+ return rc;
+}
+
+int smblib_get_prop_batt_status(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ union power_supply_propval pval = {0, };
+ bool usb_online, dc_online;
+ u8 stat;
+ int rc;
+
+ rc = smblib_get_prop_usb_online(chg, &pval);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get usb online property rc=%d\n",
+ rc);
+ return rc;
+ }
+ usb_online = (bool)pval.intval;
+
+ rc = smblib_get_prop_dc_online(chg, &pval);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get dc online property rc=%d\n",
+ rc);
+ return rc;
+ }
+ dc_online = (bool)pval.intval;
+
+ rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+ rc);
+ return rc;
+ }
+ stat = stat & BATTERY_CHARGER_STATUS_MASK;
+
+ if (!usb_online && !dc_online) {
+ switch (stat) {
+ case TERMINATE_CHARGE:
+ case INHIBIT_CHARGE:
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+ }
+ return rc;
+ }
+
+ switch (stat) {
+ case TRICKLE_CHARGE:
+ case PRE_CHARGE:
+ case FULLON_CHARGE:
+ case TAPER_CHARGE:
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case TERMINATE_CHARGE:
+ case INHIBIT_CHARGE:
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ break;
+ case DISABLE_CHARGE:
+ case PAUSE_CHARGE:
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+ break;
+ }
+
+ if (val->intval != POWER_SUPPLY_STATUS_CHARGING)
+ return 0;
+
+ if (!usb_online && dc_online
+ && chg->fake_batt_status == POWER_SUPPLY_STATUS_FULL) {
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ return 0;
+ }
+
+ rc = smblib_read(chg, BATTERY_CHARGER_STATUS_5_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ stat &= ENABLE_TRICKLE_BIT | ENABLE_PRE_CHARGING_BIT |
+ ENABLE_FULLON_MODE_BIT;
+
+ if (!stat)
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+
+ return 0;
+}
+
+int smblib_get_prop_batt_charge_type(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc;
+ u8 stat;
+
+ rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ switch (stat & BATTERY_CHARGER_STATUS_MASK) {
+ case TRICKLE_CHARGE:
+ case PRE_CHARGE:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ break;
+ case FULLON_CHARGE:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ break;
+ case TAPER_CHARGE:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_TAPER;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ }
+
+ return rc;
+}
+
+int smblib_get_prop_batt_health(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ union power_supply_propval pval;
+ int rc;
+ int effective_fv_uv;
+ u8 stat;
+
+ rc = smblib_read(chg, BATTERY_CHARGER_STATUS_2_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n",
+ rc);
+ return rc;
+ }
+ smblib_dbg(chg, PR_REGISTER, "BATTERY_CHARGER_STATUS_2 = 0x%02x\n",
+ stat);
+
+ if (stat & CHARGER_ERROR_STATUS_BAT_OV_BIT) {
+ rc = smblib_get_prop_from_bms(chg,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW, &pval);
+ if (!rc) {
+ /*
+ * If Vbatt is within 40mV above Vfloat, then don't
+ * treat it as overvoltage.
+ */
+ effective_fv_uv = get_effective_result(chg->fv_votable);
+ if (pval.intval >= effective_fv_uv + 40000) {
+ val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ smblib_err(chg, "battery over-voltage vbat_fg = %duV, fv = %duV\n",
+ pval.intval, effective_fv_uv);
+ goto done;
+ }
+ }
+ }
+
+ rc = smblib_read(chg, BATTERY_CHARGER_STATUS_7_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n",
+ rc);
+ return rc;
+ }
+ if (stat & BAT_TEMP_STATUS_TOO_COLD_BIT)
+ val->intval = POWER_SUPPLY_HEALTH_COLD;
+ else if (stat & BAT_TEMP_STATUS_TOO_HOT_BIT)
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else if (stat & BAT_TEMP_STATUS_COLD_SOFT_BIT)
+ val->intval = POWER_SUPPLY_HEALTH_COOL;
+ else if (stat & BAT_TEMP_STATUS_HOT_SOFT_BIT)
+ val->intval = POWER_SUPPLY_HEALTH_WARM;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+
+done:
+ return rc;
+}
+
+int smblib_get_prop_system_temp_level(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ val->intval = chg->system_temp_level;
+ return 0;
+}
+
+int smblib_get_prop_system_temp_level_max(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ val->intval = chg->thermal_levels;
+ return 0;
+}
+
+int smblib_get_prop_input_current_limited(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ u8 stat;
+ int rc;
+
+ if (chg->fake_input_current_limited >= 0) {
+ val->intval = chg->fake_input_current_limited;
+ return 0;
+ }
+
+ rc = smblib_read(chg, AICL_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read AICL_STATUS rc=%d\n", rc);
+ return rc;
+ }
+ val->intval = (stat & SOFT_ILIMIT_BIT) || chg->is_hdc;
+ return 0;
+}
+
+int smblib_get_prop_batt_iterm(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc, temp;
+ u8 stat, buf[2];
+
+ /*
+ * Currently, only ADC comparator-based termination is supported,
+ * hence read only the threshold corresponding to ADC source.
+ * Proceed only if CHGR_ITERM_USE_ANALOG_BIT is 0.
+ */
+ rc = smblib_read(chg, CHGR_ENG_CHARGING_CFG_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read CHGR_ENG_CHARGING_CFG_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (stat & CHGR_ITERM_USE_ANALOG_BIT) {
+ val->intval = -EINVAL;
+ return 0;
+ }
+
+ rc = smblib_batch_read(chg, CHGR_ADC_ITERM_UP_THD_MSB_REG, buf, 2);
+
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read CHGR_ADC_ITERM_UP_THD_MSB_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ temp = buf[1] | (buf[0] << 8);
+ temp = sign_extend32(temp, 15);
+ temp = DIV_ROUND_CLOSEST(temp * 10000, ADC_CHG_TERM_MASK);
+ val->intval = temp;
+
+ return rc;
+}
+
+int smblib_get_prop_batt_charge_done(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc;
+ u8 stat;
+
+ rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ stat = stat & BATTERY_CHARGER_STATUS_MASK;
+ val->intval = (stat == TERMINATE_CHARGE);
+ return 0;
+}
+
+/***********************
+ * BATTERY PSY SETTERS *
+ ***********************/
+
+int smblib_set_prop_input_suspend(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ int rc;
+
+ /* vote 0mA when suspended */
+ rc = vote(chg->usb_icl_votable, USER_VOTER, (bool)val->intval, 0);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't vote to %s USB rc=%d\n",
+ (bool)val->intval ? "suspend" : "resume", rc);
+ return rc;
+ }
+
+ rc = vote(chg->dc_suspend_votable, USER_VOTER, (bool)val->intval, 0);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't vote to %s DC rc=%d\n",
+ (bool)val->intval ? "suspend" : "resume", rc);
+ return rc;
+ }
+
+ power_supply_changed(chg->batt_psy);
+ return rc;
+}
+
+int smblib_set_prop_batt_capacity(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ chg->fake_capacity = val->intval;
+
+ power_supply_changed(chg->batt_psy);
+
+ return 0;
+}
+
+int smblib_set_prop_batt_status(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ /* Faking battery full */
+ if (val->intval == POWER_SUPPLY_STATUS_FULL)
+ chg->fake_batt_status = val->intval;
+ else
+ chg->fake_batt_status = -EINVAL;
+
+ power_supply_changed(chg->batt_psy);
+
+ return 0;
+}
+
+int smblib_set_prop_system_temp_level(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ if (val->intval < 0)
+ return -EINVAL;
+
+ if (chg->thermal_levels <= 0)
+ return -EINVAL;
+
+ if (val->intval > chg->thermal_levels)
+ return -EINVAL;
+
+ chg->system_temp_level = val->intval;
+
+ if (chg->system_temp_level == chg->thermal_levels)
+ return vote(chg->chg_disable_votable,
+ THERMAL_DAEMON_VOTER, true, 0);
+
+ vote(chg->chg_disable_votable, THERMAL_DAEMON_VOTER, false, 0);
+ if (chg->system_temp_level == 0)
+ return vote(chg->fcc_votable, THERMAL_DAEMON_VOTER, false, 0);
+
+ vote(chg->fcc_votable, THERMAL_DAEMON_VOTER, true,
+ chg->thermal_mitigation[chg->system_temp_level]);
+ return 0;
+}
+
+int smblib_set_prop_input_current_limited(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ chg->fake_input_current_limited = val->intval;
+ return 0;
+}
+
+int smblib_set_prop_rechg_soc_thresh(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ int rc;
+ u8 new_thr = DIV_ROUND_CLOSEST(val->intval * 255, 100);
+
+ rc = smblib_write(chg, CHARGE_RCHG_SOC_THRESHOLD_CFG_REG,
+ new_thr);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't write to RCHG_SOC_THRESHOLD_CFG_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ chg->auto_recharge_soc = val->intval;
+
+ return rc;
+}
+
+int smblib_rerun_aicl(struct smb_charger *chg)
+{
+ int rc;
+ u8 stat;
+
+ rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* USB is suspended so skip re-running AICL */
+ if (stat & USBIN_SUSPEND_STS_BIT)
+ return rc;
+
+ smblib_dbg(chg, PR_MISC, "re-running AICL\n");
+
+ rc = smblib_masked_write(chg, AICL_CMD_REG, RERUN_AICL_BIT,
+ RERUN_AICL_BIT);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't write to AICL_CMD_REG rc=%d\n",
+ rc);
+ return 0;
+}
+
+static int smblib_dp_pulse(struct smb_charger *chg)
+{
+ int rc;
+
+ /* QC 3.0 increment */
+ rc = smblib_masked_write(chg, CMD_HVDCP_2_REG, SINGLE_INCREMENT_BIT,
+ SINGLE_INCREMENT_BIT);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't write to CMD_HVDCP_2_REG rc=%d\n",
+ rc);
+
+ return rc;
+}
+
+static int smblib_dm_pulse(struct smb_charger *chg)
+{
+ int rc;
+
+ /* QC 3.0 decrement */
+ rc = smblib_masked_write(chg, CMD_HVDCP_2_REG, SINGLE_DECREMENT_BIT,
+ SINGLE_DECREMENT_BIT);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't write to CMD_HVDCP_2_REG rc=%d\n",
+ rc);
+
+ return rc;
+}
+
+static int smblib_force_vbus_voltage(struct smb_charger *chg, u8 val)
+{
+ int rc;
+
+ rc = smblib_masked_write(chg, CMD_HVDCP_2_REG, val, val);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't write to CMD_HVDCP_2_REG rc=%d\n",
+ rc);
+
+ return rc;
+}
+
+static void smblib_hvdcp_set_fsw(struct smb_charger *chg, int bit)
+{
+ switch (bit) {
+ case QC_5V_BIT:
+ smblib_set_opt_switcher_freq(chg,
+ chg->chg_freq.freq_5V);
+ break;
+ case QC_9V_BIT:
+ smblib_set_opt_switcher_freq(chg,
+ chg->chg_freq.freq_9V);
+ break;
+ case QC_12V_BIT:
+ smblib_set_opt_switcher_freq(chg,
+ chg->chg_freq.freq_12V);
+ break;
+ default:
+ smblib_set_opt_switcher_freq(chg,
+ chg->chg_freq.freq_removal);
+ break;
+ }
+}
+
+#define QC3_PULSES_FOR_6V 5
+#define QC3_PULSES_FOR_9V 20
+#define QC3_PULSES_FOR_12V 35
+static int smblib_hvdcp3_set_fsw(struct smb_charger *chg)
+{
+ int pulse_count, rc;
+
+ rc = smblib_get_pulse_cnt(chg, &pulse_count);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read QC_PULSE_COUNT rc=%d\n", rc);
+ return rc;
+ }
+
+ if (pulse_count < QC3_PULSES_FOR_6V)
+ smblib_set_opt_switcher_freq(chg,
+ chg->chg_freq.freq_5V);
+ else if (pulse_count < QC3_PULSES_FOR_9V)
+ smblib_set_opt_switcher_freq(chg,
+ chg->chg_freq.freq_6V_8V);
+ else if (pulse_count < QC3_PULSES_FOR_12V)
+ smblib_set_opt_switcher_freq(chg,
+ chg->chg_freq.freq_9V);
+ else
+ smblib_set_opt_switcher_freq(chg,
+ chg->chg_freq.freq_12V);
+
+ return 0;
+}
+
+static void smblib_hvdcp_adaptive_voltage_change(struct smb_charger *chg)
+{
+ int rc;
+ u8 stat;
+
+ if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_HVDCP) {
+ rc = smblib_read(chg, QC_CHANGE_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg,
+ "Couldn't read QC_CHANGE_STATUS rc=%d\n", rc);
+ return;
+ }
+
+ smblib_hvdcp_set_fsw(chg, stat & QC_2P0_STATUS_MASK);
+ }
+
+ if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_HVDCP_3) {
+ rc = smblib_hvdcp3_set_fsw(chg);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't set QC3.0 Fsw rc=%d\n", rc);
+ }
+
+ power_supply_changed(chg->usb_main_psy);
+}
+
+int smblib_dp_dm(struct smb_charger *chg, int val)
+{
+ int target_icl_ua, rc = 0;
+ union power_supply_propval pval;
+ u8 stat;
+
+ switch (val) {
+ case POWER_SUPPLY_DP_DM_DP_PULSE:
+ /*
+ * Pre-emptively increment pulse count to enable the setting
+ * of FSW prior to increasing voltage.
+ */
+ chg->pulse_cnt++;
+
+ rc = smblib_hvdcp3_set_fsw(chg);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't set QC3.0 Fsw rc=%d\n", rc);
+
+ rc = smblib_dp_pulse(chg);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't increase pulse count rc=%d\n",
+ rc);
+ /*
+ * Increment pulse count failed;
+ * reset to former value.
+ */
+ chg->pulse_cnt--;
+ }
+
+ smblib_dbg(chg, PR_PARALLEL, "DP_DM_DP_PULSE rc=%d cnt=%d\n",
+ rc, chg->pulse_cnt);
+ break;
+ case POWER_SUPPLY_DP_DM_DM_PULSE:
+ rc = smblib_dm_pulse(chg);
+ if (!rc && chg->pulse_cnt)
+ chg->pulse_cnt--;
+ smblib_dbg(chg, PR_PARALLEL, "DP_DM_DM_PULSE rc=%d cnt=%d\n",
+ rc, chg->pulse_cnt);
+ break;
+ case POWER_SUPPLY_DP_DM_ICL_DOWN:
+ target_icl_ua = get_effective_result(chg->usb_icl_votable);
+ if (target_icl_ua < 0) {
+ /* no client vote, get the ICL from charger */
+ rc = power_supply_get_property(chg->usb_psy,
+ POWER_SUPPLY_PROP_HW_CURRENT_MAX,
+ &pval);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get max curr rc=%d\n",
+ rc);
+ return rc;
+ }
+ target_icl_ua = pval.intval;
+ }
+
+ /*
+ * Check if any other voter voted on USB_ICL in case of
+ * voter other than SW_QC3_VOTER reset and restart reduction
+ * again.
+ */
+ if (target_icl_ua != get_client_vote(chg->usb_icl_votable,
+ SW_QC3_VOTER))
+ chg->usb_icl_delta_ua = 0;
+
+ chg->usb_icl_delta_ua += 100000;
+ vote(chg->usb_icl_votable, SW_QC3_VOTER, true,
+ target_icl_ua - 100000);
+ smblib_dbg(chg, PR_PARALLEL, "ICL DOWN ICL=%d reduction=%d\n",
+ target_icl_ua, chg->usb_icl_delta_ua);
+ break;
+ case POWER_SUPPLY_DP_DM_FORCE_5V:
+ rc = smblib_force_vbus_voltage(chg, FORCE_5V_BIT);
+ if (rc < 0)
+ pr_err("Failed to force 5V\n");
+ break;
+ case POWER_SUPPLY_DP_DM_FORCE_9V:
+ if (chg->qc2_unsupported_voltage == QC2_NON_COMPLIANT_9V) {
+ smblib_err(chg, "Couldn't set 9V: unsupported\n");
+ return -EINVAL;
+ }
+
+ /* If we are increasing voltage to get to 9V, set FSW first */
+ rc = smblib_read(chg, QC_CHANGE_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read QC_CHANGE_STATUS_REG rc=%d\n",
+ rc);
+ break;
+ }
+
+ if (stat & QC_5V_BIT)
+ smblib_hvdcp_set_fsw(chg, QC_9V_BIT);
+
+ rc = smblib_force_vbus_voltage(chg, FORCE_9V_BIT);
+ if (rc < 0)
+ pr_err("Failed to force 9V\n");
+ break;
+ case POWER_SUPPLY_DP_DM_FORCE_12V:
+ if (chg->qc2_unsupported_voltage == QC2_NON_COMPLIANT_12V) {
+ smblib_err(chg, "Couldn't set 12V: unsupported\n");
+ return -EINVAL;
+ }
+
+ /* If we are increasing voltage to get to 12V, set FSW first */
+ rc = smblib_read(chg, QC_CHANGE_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read QC_CHANGE_STATUS_REG rc=%d\n",
+ rc);
+ break;
+ }
+
+ if ((stat & QC_9V_BIT) || (stat & QC_5V_BIT))
+ smblib_hvdcp_set_fsw(chg, QC_12V_BIT);
+
+ rc = smblib_force_vbus_voltage(chg, FORCE_12V_BIT);
+ if (rc < 0)
+ pr_err("Failed to force 12V\n");
+ break;
+ case POWER_SUPPLY_DP_DM_ICL_UP:
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+int smblib_disable_hw_jeita(struct smb_charger *chg, bool disable)
+{
+ int rc;
+ u8 mask;
+
+ /*
+ * Disable h/w base JEITA compensation if s/w JEITA is enabled
+ */
+ mask = JEITA_EN_COLD_SL_FCV_BIT
+ | JEITA_EN_HOT_SL_FCV_BIT
+ | JEITA_EN_HOT_SL_CCC_BIT
+ | JEITA_EN_COLD_SL_CCC_BIT,
+ rc = smblib_masked_write(chg, JEITA_EN_CFG_REG, mask,
+ disable ? 0 : mask);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure s/w jeita rc=%d\n",
+ rc);
+ return rc;
+ }
+ return 0;
+}
+
+static int smblib_set_sw_thermal_regulation(struct smb_charger *chg,
+ bool enable)
+{
+ int rc = 0;
+
+ if (!(chg->wa_flags & SW_THERM_REGULATION_WA))
+ return rc;
+
+ if (enable) {
+ /*
+ * Configure min time to quickly address thermal
+ * condition.
+ */
+ rc = smblib_masked_write(chg, SNARL_BARK_BITE_WD_CFG_REG,
+ SNARL_WDOG_TIMEOUT_MASK, SNARL_WDOG_TMOUT_62P5MS);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't configure snarl wdog tmout, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ vote(chg->wdog_snarl_irq_en_votable, SW_THERM_REGULATION_VOTER,
+ true, 0);
+ /*
+ * Schedule SW_THERM_REGULATION_WORK directly if USB input
+ * is suspended due to SW thermal regulation WA since WDOG
+ * IRQ won't trigger with input suspended.
+ */
+ if (is_client_vote_enabled(chg->usb_icl_votable,
+ SW_THERM_REGULATION_VOTER)) {
+ vote(chg->awake_votable, SW_THERM_REGULATION_VOTER,
+ true, 0);
+ schedule_delayed_work(&chg->thermal_regulation_work, 0);
+ }
+ } else {
+ vote(chg->wdog_snarl_irq_en_votable, SW_THERM_REGULATION_VOTER,
+ false, 0);
+ cancel_delayed_work_sync(&chg->thermal_regulation_work);
+ vote(chg->awake_votable, SW_THERM_REGULATION_VOTER, false, 0);
+ }
+
+ smblib_dbg(chg, PR_MISC, "WDOG SNARL INT %s\n",
+ enable ? "Enabled" : "Disabled");
+
+ return rc;
+}
+
+static int smblib_update_thermal_readings(struct smb_charger *chg)
+{
+ union power_supply_propval pval = {0, };
+ int rc = 0;
+
+ if (!chg->pl.psy)
+ chg->pl.psy = power_supply_get_by_name("parallel");
+
+ rc = smblib_read_iio_channel(chg, chg->iio.die_temp_chan,
+ DIV_FACTOR_DECIDEGC, &chg->die_temp);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read DIE TEMP channel, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smblib_read_iio_channel(chg, chg->iio.connector_temp_chan,
+ DIV_FACTOR_DECIDEGC, &chg->connector_temp);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read CONN TEMP channel, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smblib_read_iio_channel(chg, chg->iio.skin_temp_chan,
+ DIV_FACTOR_DECIDEGC, &chg->skin_temp);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read SKIN TEMP channel, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (chg->sec_chg_selected == POWER_SUPPLY_CHARGER_SEC_CP) {
+ rc = smblib_read_iio_channel(chg, chg->iio.smb_temp_chan,
+ DIV_FACTOR_DECIDEGC, &chg->smb_temp);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read SMB TEMP channel, rc=%d\n",
+ rc);
+ return rc;
+ }
+ } else if (chg->pl.psy && chg->sec_chg_selected ==
+ POWER_SUPPLY_CHARGER_SEC_PL) {
+ rc = power_supply_get_property(chg->pl.psy,
+ POWER_SUPPLY_PROP_CHARGER_TEMP, &pval);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get smb charger temp, rc=%d\n",
+ rc);
+ return rc;
+ }
+ chg->smb_temp = pval.intval;
+ } else {
+ chg->smb_temp = -ENODATA;
+ }
+
+ return rc;
+}
+
+/* SW thermal regulation thresholds in deciDegC */
+#define DIE_TEMP_RST_THRESH 1000
+#define DIE_TEMP_REG_H_THRESH 800
+#define DIE_TEMP_REG_L_THRESH 600
+
+#define CONNECTOR_TEMP_SHDN_THRESH 700
+#define CONNECTOR_TEMP_RST_THRESH 600
+#define CONNECTOR_TEMP_REG_H_THRESH 550
+#define CONNECTOR_TEMP_REG_L_THRESH 500
+
+#define SMB_TEMP_SHDN_THRESH 1400
+#define SMB_TEMP_RST_THRESH 900
+#define SMB_TEMP_REG_H_THRESH 800
+#define SMB_TEMP_REG_L_THRESH 600
+
+#define SKIN_TEMP_SHDN_THRESH 700
+#define SKIN_TEMP_RST_THRESH 600
+#define SKIN_TEMP_REG_H_THRESH 550
+#define SKIN_TEMP_REG_L_THRESH 500
+
+#define THERM_REG_RECHECK_DELAY_1S 1000 /* 1 sec */
+#define THERM_REG_RECHECK_DELAY_8S 8000 /* 8 sec */
+static int smblib_process_thermal_readings(struct smb_charger *chg)
+{
+ int rc = 0, wdog_timeout = SNARL_WDOG_TMOUT_8S;
+ u32 thermal_status = TEMP_BELOW_RANGE;
+ bool suspend_input = false, disable_smb = false;
+
+ /*
+ * Following is the SW thermal regulation flow:
+ *
+ * TEMP_SHUT_DOWN_LEVEL: If either connector temp or skin temp
+ * exceeds their respective SHDN threshold. Need to suspend input
+ * and secondary charger.
+ *
+ * TEMP_SHUT_DOWN_SMB_LEVEL: If smb temp exceed its SHDN threshold
+ * but connector and skin temp are below it. Need to suspend SMB.
+ *
+ * TEMP_ALERT_LEVEL: If die, connector, smb or skin temp exceeds it's
+ * respective RST threshold. Stay put and monitor temperature closely.
+ *
+ * TEMP_ABOVE_RANGE or TEMP_WITHIN_RANGE or TEMP_BELOW_RANGE: If die,
+ * connector, smb or skin temp exceeds it's respective REG_H or REG_L
+ * threshold. Unsuspend input and SMB.
+ */
+ if (chg->connector_temp > CONNECTOR_TEMP_SHDN_THRESH ||
+ chg->skin_temp > SKIN_TEMP_SHDN_THRESH) {
+ thermal_status = TEMP_SHUT_DOWN;
+ wdog_timeout = SNARL_WDOG_TMOUT_1S;
+ suspend_input = true;
+ disable_smb = true;
+ goto out;
+ }
+
+ if (chg->smb_temp > SMB_TEMP_SHDN_THRESH) {
+ thermal_status = TEMP_SHUT_DOWN_SMB;
+ wdog_timeout = SNARL_WDOG_TMOUT_1S;
+ disable_smb = true;
+ goto out;
+ }
+
+ if (chg->connector_temp > CONNECTOR_TEMP_RST_THRESH ||
+ chg->skin_temp > SKIN_TEMP_RST_THRESH ||
+ chg->smb_temp > SMB_TEMP_RST_THRESH ||
+ chg->die_temp > DIE_TEMP_RST_THRESH) {
+ thermal_status = TEMP_ALERT_LEVEL;
+ wdog_timeout = SNARL_WDOG_TMOUT_1S;
+ goto out;
+ }
+
+ if (chg->connector_temp > CONNECTOR_TEMP_REG_H_THRESH ||
+ chg->skin_temp > SKIN_TEMP_REG_H_THRESH ||
+ chg->smb_temp > SMB_TEMP_REG_H_THRESH ||
+ chg->die_temp > DIE_TEMP_REG_H_THRESH) {
+ thermal_status = TEMP_ABOVE_RANGE;
+ wdog_timeout = SNARL_WDOG_TMOUT_1S;
+ goto out;
+ }
+
+ if (chg->connector_temp > CONNECTOR_TEMP_REG_L_THRESH ||
+ chg->skin_temp > SKIN_TEMP_REG_L_THRESH ||
+ chg->smb_temp > SMB_TEMP_REG_L_THRESH ||
+ chg->die_temp > DIE_TEMP_REG_L_THRESH) {
+ thermal_status = TEMP_WITHIN_RANGE;
+ wdog_timeout = SNARL_WDOG_TMOUT_8S;
+ }
+out:
+ smblib_dbg(chg, PR_MISC, "Current temperatures: \tDIE_TEMP: %d,\tCONN_TEMP: %d,\tSMB_TEMP: %d,\tSKIN_TEMP: %d\nTHERMAL_STATUS: %d\n",
+ chg->die_temp, chg->connector_temp, chg->smb_temp,
+ chg->skin_temp, thermal_status);
+
+ if (thermal_status != chg->thermal_status) {
+ chg->thermal_status = thermal_status;
+ /*
+ * If thermal level changes to TEMP ALERT LEVEL, don't
+ * enable/disable main/parallel charging.
+ */
+ if (chg->thermal_status == TEMP_ALERT_LEVEL)
+ goto exit;
+
+ /* Enable/disable SMB_EN pin */
+ rc = smblib_masked_write(chg, MISC_SMB_EN_CMD_REG,
+ SMB_EN_OVERRIDE_BIT | SMB_EN_OVERRIDE_VALUE_BIT,
+ (disable_smb ? SMB_EN_OVERRIDE_BIT :
+ (SMB_EN_OVERRIDE_BIT | SMB_EN_OVERRIDE_VALUE_BIT)));
+ if (rc < 0)
+ smblib_err(chg, "Couldn't set SMB_EN, rc=%d\n", rc);
+
+ /*
+ * Enable/disable secondary charger through votables to ensure
+ * that if SMB_EN pin get's toggled somehow, secondary charger
+ * remains enabled/disabled according to SW thermal regulation.
+ */
+ if (!chg->cp_disable_votable)
+ chg->cp_disable_votable = find_votable("CP_DISABLE");
+ if (chg->cp_disable_votable)
+ vote(chg->cp_disable_votable, SW_THERM_REGULATION_VOTER,
+ disable_smb, 0);
+
+ vote(chg->pl_disable_votable, SW_THERM_REGULATION_VOTER,
+ disable_smb, 0);
+ smblib_dbg(chg, PR_MISC, "Parallel %s as per SW thermal regulation\n",
+ disable_smb ? "disabled" : "enabled");
+
+ /*
+ * If thermal level changes to TEMP_SHUT_DOWN_SMB, don't
+ * enable/disable main charger.
+ */
+ if (chg->thermal_status == TEMP_SHUT_DOWN_SMB)
+ goto exit;
+
+ /* Suspend input if SHDN threshold reached */
+ vote(chg->dc_suspend_votable, SW_THERM_REGULATION_VOTER,
+ suspend_input, 0);
+ vote(chg->usb_icl_votable, SW_THERM_REGULATION_VOTER,
+ suspend_input, 0);
+ smblib_dbg(chg, PR_MISC, "USB/DC %s as per SW thermal regulation\n",
+ suspend_input ? "suspended" : "unsuspended");
+ }
+exit:
+ /*
+ * On USB suspend, WDOG IRQ stops triggering. To continue thermal
+ * monitoring and regulation until USB is plugged out, reschedule
+ * the SW thermal regulation work without releasing the wake lock.
+ */
+ if (is_client_vote_enabled(chg->usb_icl_votable,
+ SW_THERM_REGULATION_VOTER)) {
+ schedule_delayed_work(&chg->thermal_regulation_work,
+ msecs_to_jiffies(THERM_REG_RECHECK_DELAY_1S));
+ return 0;
+ }
+
+ rc = smblib_masked_write(chg, SNARL_BARK_BITE_WD_CFG_REG,
+ SNARL_WDOG_TIMEOUT_MASK, wdog_timeout);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't set WD SNARL timer, rc=%d\n", rc);
+
+ vote(chg->awake_votable, SW_THERM_REGULATION_VOTER, false, 0);
+ return rc;
+}
+
+/*******************
+ * DC PSY GETTERS *
+ *******************/
+
+int smblib_get_prop_dc_present(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc;
+ u8 stat;
+
+ rc = smblib_read(chg, DCIN_BASE + INT_RT_STS_OFFSET, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read DCIN_RT_STS rc=%d\n", rc);
+ return rc;
+ }
+
+ val->intval = (bool)(stat & DCIN_PLUGIN_RT_STS_BIT);
+ return 0;
+}
+
+int smblib_get_prop_dc_online(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc = 0;
+ u8 stat;
+
+ if (get_client_vote(chg->dc_suspend_votable, USER_VOTER)) {
+ val->intval = false;
+ return rc;
+ }
+
+ rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n",
+ rc);
+ return rc;
+ }
+ smblib_dbg(chg, PR_REGISTER, "POWER_PATH_STATUS = 0x%02x\n",
+ stat);
+
+ val->intval = (stat & USE_DCIN_BIT) &&
+ (stat & VALID_INPUT_POWER_SOURCE_STS_BIT);
+
+ return rc;
+}
+
+int smblib_get_prop_dc_current_max(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ return smblib_get_charge_param(chg, &chg->param.dc_icl, &val->intval);
+}
+
+int smblib_get_prop_dc_voltage_max(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ val->intval = MICRO_12V;
+ return 0;
+}
+
+int smblib_get_prop_dc_voltage_now(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc;
+
+ if (!chg->wls_psy) {
+ chg->wls_psy = power_supply_get_by_name("wireless");
+ if (!chg->wls_psy)
+ return -ENODEV;
+ }
+
+ rc = power_supply_get_property(chg->wls_psy,
+ POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION,
+ val);
+ if (rc < 0)
+ dev_err(chg->dev, "Couldn't get POWER_SUPPLY_PROP_VOLTAGE_REGULATION, rc=%d\n",
+ rc);
+ return rc;
+}
+
+/*******************
+ * DC PSY SETTERS *
+ *******************/
+
+int smblib_set_prop_dc_current_max(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ return smblib_set_charge_param(chg, &chg->param.dc_icl, val->intval);
+}
+
+int smblib_set_prop_voltage_wls_output(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ int rc;
+
+ if (!chg->wls_psy) {
+ chg->wls_psy = power_supply_get_by_name("wireless");
+ if (!chg->wls_psy)
+ return -ENODEV;
+ }
+
+ rc = power_supply_set_property(chg->wls_psy,
+ POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION,
+ val);
+ if (rc < 0)
+ dev_err(chg->dev, "Couldn't set POWER_SUPPLY_PROP_VOLTAGE_REGULATION, rc=%d\n",
+ rc);
+
+ smblib_dbg(chg, PR_WLS, "Set WLS output voltage %d\n", val->intval);
+
+ return rc;
+}
+
+/*******************
+ * USB PSY GETTERS *
+ *******************/
+
+int smblib_get_prop_usb_present(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc;
+ u8 stat;
+
+ rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read USBIN_RT_STS rc=%d\n", rc);
+ return rc;
+ }
+
+ val->intval = (bool)(stat & USBIN_PLUGIN_RT_STS_BIT);
+ return 0;
+}
+
+int smblib_get_prop_usb_online(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc = 0;
+ u8 stat;
+
+ if (get_client_vote_locked(chg->usb_icl_votable, USER_VOTER) == 0) {
+ val->intval = false;
+ return rc;
+ }
+
+ rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n",
+ rc);
+ return rc;
+ }
+ smblib_dbg(chg, PR_REGISTER, "POWER_PATH_STATUS = 0x%02x\n",
+ stat);
+
+ val->intval = (stat & USE_USBIN_BIT) &&
+ (stat & VALID_INPUT_POWER_SOURCE_STS_BIT);
+ return rc;
+}
+
+int smblib_get_prop_usb_voltage_max(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ switch (chg->real_charger_type) {
+ case POWER_SUPPLY_TYPE_USB_HVDCP:
+ if (chg->qc2_unsupported_voltage == QC2_NON_COMPLIANT_9V) {
+ val->intval = MICRO_5V;
+ break;
+ } else if (chg->qc2_unsupported_voltage ==
+ QC2_NON_COMPLIANT_12V) {
+ val->intval = MICRO_9V;
+ break;
+ }
+ /* else, fallthrough */
+ case POWER_SUPPLY_TYPE_USB_HVDCP_3:
+ case POWER_SUPPLY_TYPE_USB_PD:
+ if (chg->smb_version == PMI632_SUBTYPE)
+ val->intval = MICRO_9V;
+ else
+ val->intval = MICRO_12V;
+ break;
+ default:
+ val->intval = MICRO_5V;
+ break;
+ }
+
+ return 0;
+}
+
+static int smblib_estimate_hvdcp_voltage(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc;
+ u8 stat;
+
+ rc = smblib_read(chg, QC_CHANGE_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read QC_CHANGE_STATUS_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (stat & QC_5V_BIT)
+ val->intval = MICRO_5V;
+ else if (stat & QC_9V_BIT)
+ val->intval = MICRO_9V;
+ else if (stat & QC_12V_BIT)
+ val->intval = MICRO_12V;
+
+ return 0;
+}
+
+#define HVDCP3_STEP_UV 200000
+static int smblib_estimate_adaptor_voltage(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ switch (chg->real_charger_type) {
+ case POWER_SUPPLY_TYPE_USB_HVDCP:
+ return smblib_estimate_hvdcp_voltage(chg, val);
+ case POWER_SUPPLY_TYPE_USB_HVDCP_3:
+ val->intval = MICRO_5V + (HVDCP3_STEP_UV * chg->pulse_cnt);
+ break;
+ case POWER_SUPPLY_TYPE_USB_PD:
+ /* Take the average of min and max values */
+ val->intval = chg->voltage_min_uv +
+ ((chg->voltage_max_uv - chg->voltage_min_uv) / 2);
+ break;
+ default:
+ val->intval = MICRO_5V;
+ break;
+ }
+
+ return 0;
+}
+
+static int smblib_read_mid_voltage_chan(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc;
+
+ if (!chg->iio.mid_chan)
+ return -ENODATA;
+
+ rc = iio_read_channel_processed(chg->iio.mid_chan, &val->intval);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read MID channel rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * If MID voltage < 1V, it is unreliable.
+ * Figure out voltage from registers and calculations.
+ */
+ if (val->intval < 1000000)
+ return smblib_estimate_adaptor_voltage(chg, val);
+
+ return 0;
+}
+
+static int smblib_read_usbin_voltage_chan(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc;
+
+ if (!chg->iio.usbin_v_chan)
+ return -ENODATA;
+
+ rc = iio_read_channel_processed(chg->iio.usbin_v_chan, &val->intval);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read USBIN channel rc=%d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+int smblib_get_prop_usb_voltage_now(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ union power_supply_propval pval = {0, };
+ int rc;
+
+ rc = smblib_get_prop_usb_present(chg, &pval);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get usb presence status rc=%d\n", rc);
+ return -ENODATA;
+ }
+
+ /* usb not present */
+ if (!pval.intval) {
+ val->intval = 0;
+ return 0;
+ }
+
+ /*
+ * For PM8150B, use MID_CHG ADC channel because overvoltage is observed
+ * to occur randomly in the USBIN channel, particularly at high
+ * voltages.
+ */
+ if (chg->smb_version == PM8150B_SUBTYPE)
+ return smblib_read_mid_voltage_chan(chg, val);
+ else
+ return smblib_read_usbin_voltage_chan(chg, val);
+}
+
+bool smblib_rsbux_low(struct smb_charger *chg, int r_thr)
+{
+ int r_sbu1, r_sbu2;
+ bool ret = false;
+ int rc;
+
+ if (!chg->iio.sbux_chan)
+ return false;
+
+ /* disable crude sensors */
+ rc = smblib_masked_write(chg, TYPE_C_CRUDE_SENSOR_CFG_REG,
+ EN_SRC_CRUDE_SENSOR_BIT | EN_SNK_CRUDE_SENSOR_BIT,
+ 0);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't disable crude sensor rc=%d\n", rc);
+ return false;
+ }
+
+ /* select SBU1 as current source */
+ rc = smblib_write(chg, TYPE_C_SBU_CFG_REG, SEL_SBU1_ISRC_VAL);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't select SBU1 rc=%d\n", rc);
+ goto cleanup;
+ }
+
+ rc = iio_read_channel_processed(chg->iio.sbux_chan, &r_sbu1);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read SBU1 rc=%d\n", rc);
+ goto cleanup;
+ }
+
+ if (r_sbu1 < r_thr) {
+ ret = true;
+ goto cleanup;
+ }
+
+ /* select SBU2 as current source */
+ rc = smblib_write(chg, TYPE_C_SBU_CFG_REG, SEL_SBU2_ISRC_VAL);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't select SBU1 rc=%d\n", rc);
+ goto cleanup;
+ }
+
+ rc = iio_read_channel_processed(chg->iio.sbux_chan, &r_sbu2);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read SBU1 rc=%d\n", rc);
+ goto cleanup;
+ }
+
+ if (r_sbu2 < r_thr)
+ ret = true;
+cleanup:
+ /* enable crude sensors */
+ rc = smblib_masked_write(chg, TYPE_C_CRUDE_SENSOR_CFG_REG,
+ EN_SRC_CRUDE_SENSOR_BIT | EN_SNK_CRUDE_SENSOR_BIT,
+ EN_SRC_CRUDE_SENSOR_BIT | EN_SNK_CRUDE_SENSOR_BIT);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't enable crude sensor rc=%d\n", rc);
+
+ /* disable current source */
+ rc = smblib_write(chg, TYPE_C_SBU_CFG_REG, 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't select SBU1 rc=%d\n", rc);
+
+ return ret;
+}
+
+int smblib_get_prop_charger_temp(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int temp, rc;
+ int input_present;
+
+ rc = smblib_is_input_present(chg, &input_present);
+ if (rc < 0)
+ return rc;
+
+ if (input_present == INPUT_NOT_PRESENT)
+ return -ENODATA;
+
+ if (chg->iio.temp_chan) {
+ rc = iio_read_channel_processed(chg->iio.temp_chan,
+ &temp);
+ if (rc < 0) {
+ pr_err("Error in reading temp channel, rc=%d\n", rc);
+ return rc;
+ }
+ val->intval = temp / 100;
+ } else {
+ return -ENODATA;
+ }
+
+ return rc;
+}
+
+int smblib_get_prop_typec_cc_orientation(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc = 0;
+ u8 stat;
+
+ rc = smblib_read(chg, TYPE_C_MISC_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+ return rc;
+ }
+ smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_4 = 0x%02x\n", stat);
+
+ if (stat & CC_ATTACHED_BIT)
+ val->intval = (bool)(stat & CC_ORIENTATION_BIT) + 1;
+ else
+ val->intval = 0;
+
+ return rc;
+}
+
+static const char * const smblib_typec_mode_name[] = {
+ [POWER_SUPPLY_TYPEC_NONE] = "NONE",
+ [POWER_SUPPLY_TYPEC_SOURCE_DEFAULT] = "SOURCE_DEFAULT",
+ [POWER_SUPPLY_TYPEC_SOURCE_MEDIUM] = "SOURCE_MEDIUM",
+ [POWER_SUPPLY_TYPEC_SOURCE_HIGH] = "SOURCE_HIGH",
+ [POWER_SUPPLY_TYPEC_NON_COMPLIANT] = "NON_COMPLIANT",
+ [POWER_SUPPLY_TYPEC_SINK] = "SINK",
+ [POWER_SUPPLY_TYPEC_SINK_POWERED_CABLE] = "SINK_POWERED_CABLE",
+ [POWER_SUPPLY_TYPEC_SINK_DEBUG_ACCESSORY] = "SINK_DEBUG_ACCESSORY",
+ [POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER] = "SINK_AUDIO_ADAPTER",
+ [POWER_SUPPLY_TYPEC_POWERED_CABLE_ONLY] = "POWERED_CABLE_ONLY",
+};
+
+static int smblib_get_prop_ufp_mode(struct smb_charger *chg)
+{
+ int rc;
+ u8 stat;
+
+ rc = smblib_read(chg, TYPE_C_SNK_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_1 rc=%d\n", rc);
+ return POWER_SUPPLY_TYPEC_NONE;
+ }
+ smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_1 = 0x%02x\n", stat);
+
+ switch (stat & DETECTED_SRC_TYPE_MASK) {
+ case SNK_RP_STD_BIT:
+ return POWER_SUPPLY_TYPEC_SOURCE_DEFAULT;
+ case SNK_RP_1P5_BIT:
+ return POWER_SUPPLY_TYPEC_SOURCE_MEDIUM;
+ case SNK_RP_3P0_BIT:
+ return POWER_SUPPLY_TYPEC_SOURCE_HIGH;
+ case SNK_RP_SHORT_BIT:
+ return POWER_SUPPLY_TYPEC_NON_COMPLIANT;
+ default:
+ break;
+ }
+
+ return POWER_SUPPLY_TYPEC_NONE;
+}
+
+static int smblib_get_prop_dfp_mode(struct smb_charger *chg)
+{
+ int rc;
+ u8 stat;
+
+ if (chg->lpd_stage == LPD_STAGE_COMMIT)
+ return POWER_SUPPLY_TYPEC_NONE;
+
+ rc = smblib_read(chg, TYPE_C_SRC_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_SRC_STATUS_REG rc=%d\n",
+ rc);
+ return POWER_SUPPLY_TYPEC_NONE;
+ }
+ smblib_dbg(chg, PR_REGISTER, "TYPE_C_SRC_STATUS_REG = 0x%02x\n", stat);
+
+ switch (stat & DETECTED_SNK_TYPE_MASK) {
+ case AUDIO_ACCESS_RA_RA_BIT:
+ return POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER;
+ case SRC_DEBUG_ACCESS_BIT:
+ return POWER_SUPPLY_TYPEC_SINK_DEBUG_ACCESSORY;
+ case SRC_RD_RA_VCONN_BIT:
+ return POWER_SUPPLY_TYPEC_SINK_POWERED_CABLE;
+ case SRC_RD_OPEN_BIT:
+ return POWER_SUPPLY_TYPEC_SINK;
+ default:
+ break;
+ }
+
+ return POWER_SUPPLY_TYPEC_NONE;
+}
+
+static int smblib_get_prop_typec_mode(struct smb_charger *chg)
+{
+ int rc;
+ u8 stat;
+
+ rc = smblib_read(chg, TYPE_C_MISC_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_MISC_STATUS_REG rc=%d\n",
+ rc);
+ return 0;
+ }
+ smblib_dbg(chg, PR_REGISTER, "TYPE_C_MISC_STATUS_REG = 0x%02x\n", stat);
+
+ if (stat & SNK_SRC_MODE_BIT)
+ return smblib_get_prop_dfp_mode(chg);
+ else
+ return smblib_get_prop_ufp_mode(chg);
+}
+
+int smblib_get_prop_typec_power_role(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc = 0;
+ u8 ctrl;
+
+ rc = smblib_read(chg, TYPE_C_MODE_CFG_REG, &ctrl);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_MODE_CFG_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+ smblib_dbg(chg, PR_REGISTER, "TYPE_C_MODE_CFG_REG = 0x%02x\n",
+ ctrl);
+
+ if (ctrl & TYPEC_DISABLE_CMD_BIT) {
+ val->intval = POWER_SUPPLY_TYPEC_PR_NONE;
+ return rc;
+ }
+
+ switch (ctrl & (EN_SRC_ONLY_BIT | EN_SNK_ONLY_BIT)) {
+ case 0:
+ val->intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+ break;
+ case EN_SRC_ONLY_BIT:
+ val->intval = POWER_SUPPLY_TYPEC_PR_SOURCE;
+ break;
+ case EN_SNK_ONLY_BIT:
+ val->intval = POWER_SUPPLY_TYPEC_PR_SINK;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_TYPEC_PR_NONE;
+ smblib_err(chg, "unsupported power role 0x%02lx\n",
+ ctrl & (EN_SRC_ONLY_BIT | EN_SNK_ONLY_BIT));
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static inline bool typec_in_src_mode(struct smb_charger *chg)
+{
+ return (chg->typec_mode > POWER_SUPPLY_TYPEC_NONE &&
+ chg->typec_mode < POWER_SUPPLY_TYPEC_SOURCE_DEFAULT);
+}
+
+int smblib_get_prop_typec_select_rp(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc, rp;
+ u8 stat;
+
+ if (!typec_in_src_mode(chg))
+ return -ENODATA;
+
+ rc = smblib_read(chg, TYPE_C_CURRSRC_CFG_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_CURRSRC_CFG_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ switch (stat & TYPEC_SRC_RP_SEL_MASK) {
+ case TYPEC_SRC_RP_STD:
+ rp = POWER_SUPPLY_TYPEC_SRC_RP_STD;
+ break;
+ case TYPEC_SRC_RP_1P5A:
+ rp = POWER_SUPPLY_TYPEC_SRC_RP_1P5A;
+ break;
+ case TYPEC_SRC_RP_3A:
+ case TYPEC_SRC_RP_3A_DUPLICATE:
+ rp = POWER_SUPPLY_TYPEC_SRC_RP_3A;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val->intval = rp;
+
+ return 0;
+}
+
+int smblib_get_prop_usb_current_now(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc = 0;
+
+ if (chg->iio.usbin_i_chan) {
+ rc = iio_read_channel_processed(chg->iio.usbin_i_chan,
+ &val->intval);
+
+ /*
+ * For PM8150B, scaling factor = reciprocal of
+ * 0.2V/A in Buck mode, 0.4V/A in Boost mode.
+ */
+ if (smblib_get_prop_ufp_mode(chg) != POWER_SUPPLY_TYPEC_NONE) {
+ val->intval *= 5;
+ return rc;
+ }
+
+ if (smblib_get_prop_dfp_mode(chg) != POWER_SUPPLY_TYPEC_NONE) {
+ val->intval = DIV_ROUND_CLOSEST(val->intval * 100, 40);
+ return rc;
+ }
+ } else {
+ rc = -ENODATA;
+ }
+
+ val->intval = 0;
+ return rc;
+}
+
+int smblib_get_prop_low_power(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc;
+ u8 stat;
+
+ rc = smblib_read(chg, TYPE_C_SRC_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_SRC_STATUS_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ val->intval = !(stat & SRC_HIGH_BATT_BIT);
+
+ return 0;
+}
+
+int smblib_get_prop_input_current_settled(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ return smblib_get_charge_param(chg, &chg->param.icl_stat, &val->intval);
+}
+
+int smblib_get_prop_input_voltage_settled(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc, pulses;
+
+ switch (chg->real_charger_type) {
+ case POWER_SUPPLY_TYPE_USB_HVDCP_3:
+ rc = smblib_get_pulse_cnt(chg, &pulses);
+ if (rc < 0) {
+ smblib_err(chg,
+ "Couldn't read QC_PULSE_COUNT rc=%d\n", rc);
+ return 0;
+ }
+ val->intval = MICRO_5V + HVDCP3_STEP_UV * pulses;
+ break;
+ case POWER_SUPPLY_TYPE_USB_PD:
+ val->intval = chg->voltage_min_uv;
+ break;
+ default:
+ val->intval = MICRO_5V;
+ break;
+ }
+
+ return 0;
+}
+
+int smblib_get_prop_pd_in_hard_reset(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ val->intval = chg->pd_hard_reset;
+ return 0;
+}
+
+int smblib_get_pe_start(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ val->intval = chg->ok_to_pd;
+ return 0;
+}
+
+int smblib_get_prop_die_health(struct smb_charger *chg)
+{
+ int rc;
+ u8 stat;
+ int input_present;
+
+ rc = smblib_is_input_present(chg, &input_present);
+ if (rc < 0)
+ return rc;
+
+ if (input_present == INPUT_NOT_PRESENT)
+ return POWER_SUPPLY_HEALTH_UNKNOWN;
+
+ if (chg->wa_flags & SW_THERM_REGULATION_WA) {
+ if (chg->die_temp == -ENODATA)
+ return POWER_SUPPLY_HEALTH_UNKNOWN;
+
+ if (chg->die_temp > DIE_TEMP_RST_THRESH)
+ return POWER_SUPPLY_HEALTH_OVERHEAT;
+
+ if (chg->die_temp > DIE_TEMP_REG_H_THRESH)
+ return POWER_SUPPLY_HEALTH_HOT;
+
+ if (chg->die_temp > DIE_TEMP_REG_L_THRESH)
+ return POWER_SUPPLY_HEALTH_WARM;
+
+ return POWER_SUPPLY_HEALTH_COOL;
+ }
+
+ rc = smblib_read(chg, DIE_TEMP_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read DIE_TEMP_STATUS_REG, rc=%d\n",
+ rc);
+ return POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+
+ if (stat & DIE_TEMP_RST_BIT)
+ return POWER_SUPPLY_HEALTH_OVERHEAT;
+
+ if (stat & DIE_TEMP_UB_BIT)
+ return POWER_SUPPLY_HEALTH_HOT;
+
+ if (stat & DIE_TEMP_LB_BIT)
+ return POWER_SUPPLY_HEALTH_WARM;
+
+ return POWER_SUPPLY_HEALTH_COOL;
+}
+
+int smblib_get_prop_connector_health(struct smb_charger *chg)
+{
+ int rc;
+ u8 stat;
+
+ if (chg->wa_flags & SW_THERM_REGULATION_WA) {
+ if (chg->connector_temp == -ENODATA)
+ return POWER_SUPPLY_HEALTH_UNKNOWN;
+
+ if (chg->connector_temp > CONNECTOR_TEMP_RST_THRESH)
+ return POWER_SUPPLY_HEALTH_OVERHEAT;
+
+ if (chg->connector_temp > CONNECTOR_TEMP_REG_H_THRESH)
+ return POWER_SUPPLY_HEALTH_HOT;
+
+ if (chg->connector_temp > CONNECTOR_TEMP_REG_L_THRESH)
+ return POWER_SUPPLY_HEALTH_WARM;
+
+ return POWER_SUPPLY_HEALTH_COOL;
+ }
+
+ rc = smblib_read(chg, CONNECTOR_TEMP_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read CONNECTOR_TEMP_STATUS_REG, rc=%d\n",
+ rc);
+ return POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+
+ if (stat & CONNECTOR_TEMP_RST_BIT)
+ return POWER_SUPPLY_HEALTH_OVERHEAT;
+
+ if (stat & CONNECTOR_TEMP_UB_BIT)
+ return POWER_SUPPLY_HEALTH_HOT;
+
+ if (stat & CONNECTOR_TEMP_LB_BIT)
+ return POWER_SUPPLY_HEALTH_WARM;
+
+ return POWER_SUPPLY_HEALTH_COOL;
+}
+
+#define SDP_CURRENT_UA 500000
+#define CDP_CURRENT_UA 1500000
+#define DCP_CURRENT_UA 1500000
+#define HVDCP_CURRENT_UA 3000000
+#define TYPEC_DEFAULT_CURRENT_UA 900000
+#define TYPEC_MEDIUM_CURRENT_UA 1500000
+#define TYPEC_HIGH_CURRENT_UA 3000000
+static int get_rp_based_dcp_current(struct smb_charger *chg, int typec_mode)
+{
+ int rp_ua;
+
+ switch (typec_mode) {
+ case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
+ rp_ua = TYPEC_HIGH_CURRENT_UA;
+ break;
+ case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
+ case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
+ /* fall through */
+ default:
+ rp_ua = DCP_CURRENT_UA;
+ }
+
+ return rp_ua;
+}
+
+/*******************
+ * USB PSY SETTERS *
+ * *****************/
+
+int smblib_set_prop_pd_current_max(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ int rc;
+
+ if (chg->pd_active)
+ rc = vote(chg->usb_icl_votable, PD_VOTER, true, val->intval);
+ else
+ rc = -EPERM;
+
+ return rc;
+}
+
+static int smblib_handle_usb_current(struct smb_charger *chg,
+ int usb_current)
+{
+ int rc = 0, rp_ua, typec_mode;
+
+ if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
+ if (usb_current == -ETIMEDOUT) {
+ if ((chg->float_cfg & FLOAT_OPTIONS_MASK)
+ == FORCE_FLOAT_SDP_CFG_BIT) {
+ /*
+ * Confiugure USB500 mode if Float charger is
+ * configured for SDP mode.
+ */
+ rc = set_sdp_current(chg, USBIN_500MA);
+ if (rc < 0)
+ smblib_err(chg,
+ "Couldn't set SDP ICL rc=%d\n",
+ rc);
+
+ return rc;
+ }
+
+ if (chg->connector_type ==
+ POWER_SUPPLY_CONNECTOR_TYPEC) {
+ /*
+ * Valid FLOAT charger, report the current
+ * based of Rp.
+ */
+ typec_mode = smblib_get_prop_typec_mode(chg);
+ rp_ua = get_rp_based_dcp_current(chg,
+ typec_mode);
+ rc = vote(chg->usb_icl_votable,
+ SW_ICL_MAX_VOTER, true, rp_ua);
+ if (rc < 0)
+ return rc;
+ } else {
+ rc = vote(chg->usb_icl_votable,
+ SW_ICL_MAX_VOTER, true, DCP_CURRENT_UA);
+ if (rc < 0)
+ return rc;
+ }
+ } else {
+ /*
+ * FLOAT charger detected as SDP by USB driver,
+ * charge with the requested current and update the
+ * real_charger_type
+ */
+ chg->real_charger_type = POWER_SUPPLY_TYPE_USB;
+ rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
+ true, usb_current);
+ if (rc < 0)
+ return rc;
+ rc = vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER,
+ false, 0);
+ if (rc < 0)
+ return rc;
+ }
+ } else {
+ rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
+ true, usb_current);
+ if (rc < 0) {
+ pr_err("Couldn't vote ICL USB_PSY_VOTER rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, false, 0);
+ if (rc < 0) {
+ pr_err("Couldn't remove SW_ICL_MAX vote rc=%d\n", rc);
+ return rc;
+ }
+
+ }
+
+ return 0;
+}
+
+int smblib_set_prop_sdp_current_max(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ union power_supply_propval pval;
+ int rc = 0;
+
+ if (!chg->pd_active) {
+ rc = smblib_get_prop_usb_present(chg, &pval);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get usb present rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ /* handle the request only when USB is present */
+ if (pval.intval)
+ rc = smblib_handle_usb_current(chg, val->intval);
+ } else if (chg->system_suspend_supported) {
+ if (val->intval <= USBIN_25MA)
+ rc = vote(chg->usb_icl_votable,
+ PD_SUSPEND_SUPPORTED_VOTER, true, val->intval);
+ else
+ rc = vote(chg->usb_icl_votable,
+ PD_SUSPEND_SUPPORTED_VOTER, false, 0);
+ }
+ return rc;
+}
+
+int smblib_set_prop_boost_current(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ int rc = 0;
+
+ rc = smblib_set_charge_param(chg, &chg->param.freq_switcher,
+ val->intval <= chg->boost_threshold_ua ?
+ chg->chg_freq.freq_below_otg_threshold :
+ chg->chg_freq.freq_above_otg_threshold);
+ if (rc < 0) {
+ dev_err(chg->dev, "Error in setting freq_boost rc=%d\n", rc);
+ return rc;
+ }
+
+ chg->boost_current_ua = val->intval;
+ return rc;
+}
+
+int smblib_set_prop_typec_power_role(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ int rc = 0;
+ u8 power_role;
+
+ if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
+ return 0;
+
+ switch (val->intval) {
+ case POWER_SUPPLY_TYPEC_PR_NONE:
+ power_role = TYPEC_DISABLE_CMD_BIT;
+ break;
+ case POWER_SUPPLY_TYPEC_PR_DUAL:
+ power_role = chg->typec_try_mode;
+ break;
+ case POWER_SUPPLY_TYPEC_PR_SINK:
+ power_role = EN_SNK_ONLY_BIT;
+ break;
+ case POWER_SUPPLY_TYPEC_PR_SOURCE:
+ power_role = EN_SRC_ONLY_BIT;
+ break;
+ default:
+ smblib_err(chg, "power role %d not supported\n", val->intval);
+ return -EINVAL;
+ }
+
+ rc = smblib_masked_write(chg, TYPE_C_MODE_CFG_REG,
+ TYPEC_POWER_ROLE_CMD_MASK | TYPEC_TRY_MODE_MASK,
+ power_role);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't write 0x%02x to TYPE_C_INTRPT_ENB_SOFTWARE_CTRL rc=%d\n",
+ power_role, rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+int smblib_set_prop_typec_select_rp(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ int rc;
+
+ if (!typec_in_src_mode(chg)) {
+ smblib_err(chg, "Couldn't set curr src: not in SRC mode\n");
+ return -EINVAL;
+ }
+
+ if (val->intval < TYPEC_SRC_RP_MAX_ELEMENTS) {
+ rc = smblib_masked_write(chg, TYPE_C_CURRSRC_CFG_REG,
+ TYPEC_SRC_RP_SEL_MASK,
+ val->intval);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't write to TYPE_C_CURRSRC_CFG rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ return -EINVAL;
+}
+
+int smblib_set_prop_pd_voltage_min(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ int rc, min_uv;
+
+ min_uv = min(val->intval, chg->voltage_max_uv);
+ rc = smblib_set_usb_pd_allowed_voltage(chg, min_uv,
+ chg->voltage_max_uv);
+ if (rc < 0) {
+ smblib_err(chg, "invalid min voltage %duV rc=%d\n",
+ val->intval, rc);
+ return rc;
+ }
+
+ chg->voltage_min_uv = min_uv;
+ power_supply_changed(chg->usb_main_psy);
+
+ return rc;
+}
+
+int smblib_set_prop_pd_voltage_max(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ int rc, max_uv;
+
+ max_uv = max(val->intval, chg->voltage_min_uv);
+
+ rc = smblib_set_usb_pd_fsw(chg, max_uv);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set FSW for voltage %duV rc=%d\n",
+ val->intval, rc);
+ return rc;
+ }
+
+ rc = smblib_set_usb_pd_allowed_voltage(chg, chg->voltage_min_uv,
+ max_uv);
+ if (rc < 0) {
+ smblib_err(chg, "invalid max voltage %duV rc=%d\n",
+ val->intval, rc);
+ return rc;
+ }
+
+ chg->voltage_max_uv = max_uv;
+ power_supply_changed(chg->usb_main_psy);
+
+ return rc;
+}
+
+int smblib_set_prop_pd_active(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ int rc = 0;
+
+ chg->pd_active = val->intval;
+
+ if (chg->pd_active) {
+ vote(chg->usb_irq_enable_votable, PD_VOTER, true, 0);
+
+ /*
+ * Enforce 500mA for PD until the real vote comes in later.
+ * It is guaranteed that pd_active is set prior to
+ * pd_current_max
+ */
+ vote(chg->usb_icl_votable, PD_VOTER, true, USBIN_500MA);
+ vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
+ vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, false, 0);
+
+ /*
+ * For PPS, Charge Pump is preferred over parallel charger if
+ * present.
+ */
+ mutex_lock(&chg->smb_lock);
+ if (chg->pd_active == POWER_SUPPLY_PD_PPS_ACTIVE
+ && chg->sec_cp_present) {
+ rc = smblib_select_sec_charger(chg,
+ POWER_SUPPLY_CHARGER_SEC_CP);
+ if (rc < 0)
+ dev_err(chg->dev, "Couldn't enable secondary charger rc=%d\n",
+ rc);
+ else
+ chg->cp_reason = POWER_SUPPLY_CP_PPS;
+ }
+ mutex_unlock(&chg->smb_lock);
+ } else {
+ vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true, SDP_100_MA);
+ vote(chg->usb_icl_votable, PD_VOTER, false, 0);
+ vote(chg->usb_irq_enable_votable, PD_VOTER, false, 0);
+
+ mutex_lock(&chg->smb_lock);
+ chg->cp_reason = POWER_SUPPLY_CP_NONE;
+ rc = smblib_select_sec_charger(chg,
+ chg->sec_pl_present ? POWER_SUPPLY_CHARGER_SEC_PL :
+ POWER_SUPPLY_CHARGER_SEC_NONE);
+ if (rc < 0)
+ dev_err(chg->dev,
+ "Couldn't enable secondary charger rc=%d\n",
+ rc);
+ mutex_unlock(&chg->smb_lock);
+
+ /* PD hard resets failed, rerun apsd */
+ if (chg->ok_to_pd) {
+ chg->ok_to_pd = false;
+ rc = smblib_configure_hvdcp_apsd(chg, true);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't enable APSD rc=%d\n", rc);
+ return rc;
+ }
+ smblib_rerun_apsd_if_required(chg);
+ }
+ }
+
+ smblib_update_usb_type(chg);
+ power_supply_changed(chg->usb_psy);
+ return rc;
+}
+
+int smblib_set_prop_ship_mode(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ int rc;
+
+ smblib_dbg(chg, PR_MISC, "Set ship mode: %d!!\n", !!val->intval);
+
+ rc = smblib_masked_write(chg, SHIP_MODE_REG, SHIP_MODE_EN_BIT,
+ !!val->intval ? SHIP_MODE_EN_BIT : 0);
+ if (rc < 0)
+ dev_err(chg->dev, "Couldn't %s ship mode, rc=%d\n",
+ !!val->intval ? "enable" : "disable", rc);
+
+ return rc;
+}
+
+int smblib_set_prop_pd_in_hard_reset(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ int rc = 0;
+
+ if (chg->pd_hard_reset == val->intval)
+ return rc;
+
+ chg->pd_hard_reset = val->intval;
+ rc = smblib_masked_write(chg, TYPE_C_EXIT_STATE_CFG_REG,
+ EXIT_SNK_BASED_ON_CC_BIT,
+ (chg->pd_hard_reset) ? EXIT_SNK_BASED_ON_CC_BIT : 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't set EXIT_SNK_BASED_ON_CC rc=%d\n",
+ rc);
+
+ return rc;
+}
+
+static int smblib_recover_from_soft_jeita(struct smb_charger *chg)
+{
+ u8 stat1, stat7;
+ int rc;
+
+ rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat1);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = smblib_read(chg, BATTERY_CHARGER_STATUS_7_REG, &stat7);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if ((chg->jeita_status && !(stat7 & BAT_TEMP_STATUS_SOFT_LIMIT_MASK) &&
+ ((stat1 & BATTERY_CHARGER_STATUS_MASK) == TERMINATE_CHARGE))) {
+ /*
+ * We are moving from JEITA soft -> Normal and charging
+ * is terminated
+ */
+ rc = smblib_write(chg, CHARGING_ENABLE_CMD_REG, 0);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't disable charging rc=%d\n",
+ rc);
+ return rc;
+ }
+ rc = smblib_write(chg, CHARGING_ENABLE_CMD_REG,
+ CHARGING_ENABLE_CMD_BIT);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't enable charging rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ chg->jeita_status = stat7 & BAT_TEMP_STATUS_SOFT_LIMIT_MASK;
+
+ return 0;
+}
+
+/************************
+ * USB MAIN PSY GETTERS *
+ ************************/
+int smblib_get_prop_fcc_delta(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc, jeita_cc_delta_ua = 0;
+
+ if (chg->sw_jeita_enabled) {
+ val->intval = 0;
+ return 0;
+ }
+
+ rc = smblib_get_jeita_cc_delta(chg, &jeita_cc_delta_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get jeita cc delta rc=%d\n", rc);
+ jeita_cc_delta_ua = 0;
+ }
+
+ val->intval = jeita_cc_delta_ua;
+ return 0;
+}
+
+/************************
+ * USB MAIN PSY SETTERS *
+ ************************/
+int smblib_get_charge_current(struct smb_charger *chg,
+ int *total_current_ua)
+{
+ const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
+ union power_supply_propval val = {0, };
+ int rc = 0, typec_source_rd, current_ua;
+ bool non_compliant;
+ u8 stat;
+
+ if (chg->pd_active) {
+ *total_current_ua =
+ get_client_vote_locked(chg->usb_icl_votable, PD_VOTER);
+ return rc;
+ }
+
+ rc = smblib_read(chg, LEGACY_CABLE_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_5 rc=%d\n", rc);
+ return rc;
+ }
+ non_compliant = stat & TYPEC_NONCOMP_LEGACY_CABLE_STATUS_BIT;
+
+ /* get settled ICL */
+ rc = smblib_get_prop_input_current_settled(chg, &val);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get settled ICL rc=%d\n", rc);
+ return rc;
+ }
+
+ typec_source_rd = smblib_get_prop_ufp_mode(chg);
+
+ /* QC 2.0/3.0 adapter */
+ if (apsd_result->bit & (QC_3P0_BIT | QC_2P0_BIT)) {
+ *total_current_ua = HVDCP_CURRENT_UA;
+ return 0;
+ }
+
+ if (non_compliant) {
+ switch (apsd_result->bit) {
+ case CDP_CHARGER_BIT:
+ current_ua = CDP_CURRENT_UA;
+ break;
+ case DCP_CHARGER_BIT:
+ case OCP_CHARGER_BIT:
+ case FLOAT_CHARGER_BIT:
+ current_ua = DCP_CURRENT_UA;
+ break;
+ default:
+ current_ua = 0;
+ break;
+ }
+
+ *total_current_ua = max(current_ua, val.intval);
+ return 0;
+ }
+
+ switch (typec_source_rd) {
+ case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
+ switch (apsd_result->bit) {
+ case CDP_CHARGER_BIT:
+ current_ua = CDP_CURRENT_UA;
+ break;
+ case DCP_CHARGER_BIT:
+ case OCP_CHARGER_BIT:
+ case FLOAT_CHARGER_BIT:
+ current_ua = chg->default_icl_ua;
+ break;
+ default:
+ current_ua = 0;
+ break;
+ }
+ break;
+ case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
+ current_ua = TYPEC_MEDIUM_CURRENT_UA;
+ break;
+ case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
+ current_ua = TYPEC_HIGH_CURRENT_UA;
+ break;
+ case POWER_SUPPLY_TYPEC_NON_COMPLIANT:
+ case POWER_SUPPLY_TYPEC_NONE:
+ default:
+ current_ua = 0;
+ break;
+ }
+
+ *total_current_ua = max(current_ua, val.intval);
+ return 0;
+}
+
+/**********************
+ * INTERRUPT HANDLERS *
+ **********************/
+
+irqreturn_t default_irq_handler(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+ return IRQ_HANDLED;
+}
+
+irqreturn_t chg_state_change_irq_handler(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+ u8 stat;
+ int rc;
+
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
+ rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+ rc);
+ return IRQ_HANDLED;
+ }
+
+ stat = stat & BATTERY_CHARGER_STATUS_MASK;
+ power_supply_changed(chg->batt_psy);
+ return IRQ_HANDLED;
+}
+
+irqreturn_t batt_temp_changed_irq_handler(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+ int rc;
+
+ rc = smblib_recover_from_soft_jeita(chg);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't recover chg from soft jeita rc=%d\n",
+ rc);
+ return IRQ_HANDLED;
+ }
+
+ rerun_election(chg->fcc_votable);
+ power_supply_changed(chg->batt_psy);
+ return IRQ_HANDLED;
+}
+
+irqreturn_t batt_psy_changed_irq_handler(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+ power_supply_changed(chg->batt_psy);
+ return IRQ_HANDLED;
+}
+
+irqreturn_t usbin_uv_irq_handler(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+ struct storm_watch *wdata;
+ const struct apsd_result *apsd = smblib_get_apsd_result(chg);
+ int rc;
+ u8 stat = 0, max_pulses = 0;
+
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+ if (!chg->irq_info[SWITCHER_POWER_OK_IRQ].irq_data)
+ return IRQ_HANDLED;
+
+ wdata = &chg->irq_info[SWITCHER_POWER_OK_IRQ].irq_data->storm_data;
+ reset_storm_count(wdata);
+
+ /* Workaround for non-QC2.0-compliant chargers follows */
+ if (!chg->qc2_unsupported_voltage &&
+ apsd->pst == POWER_SUPPLY_TYPE_USB_HVDCP) {
+ rc = smblib_read(chg, QC_CHANGE_STATUS_REG, &stat);
+ if (rc < 0)
+ smblib_err(chg,
+ "Couldn't read CHANGE_STATUS_REG rc=%d\n", rc);
+
+ if (stat & QC_5V_BIT)
+ return IRQ_HANDLED;
+
+ rc = smblib_read(chg, HVDCP_PULSE_COUNT_MAX_REG, &max_pulses);
+ if (rc < 0)
+ smblib_err(chg,
+ "Couldn't read QC2 max pulses rc=%d\n", rc);
+
+ chg->qc2_max_pulses = (max_pulses &
+ HVDCP_PULSE_COUNT_MAX_QC2_MASK);
+
+ if (stat & QC_12V_BIT) {
+ chg->qc2_unsupported_voltage = QC2_NON_COMPLIANT_12V;
+ rc = smblib_masked_write(chg, HVDCP_PULSE_COUNT_MAX_REG,
+ HVDCP_PULSE_COUNT_MAX_QC2_MASK,
+ HVDCP_PULSE_COUNT_MAX_QC2_9V);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't force max pulses to 9V rc=%d\n",
+ rc);
+
+ } else if (stat & QC_9V_BIT) {
+ chg->qc2_unsupported_voltage = QC2_NON_COMPLIANT_9V;
+ rc = smblib_masked_write(chg, HVDCP_PULSE_COUNT_MAX_REG,
+ HVDCP_PULSE_COUNT_MAX_QC2_MASK,
+ HVDCP_PULSE_COUNT_MAX_QC2_5V);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't force max pulses to 5V rc=%d\n",
+ rc);
+
+ }
+
+ rc = smblib_masked_write(chg, USBIN_AICL_OPTIONS_CFG_REG,
+ SUSPEND_ON_COLLAPSE_USBIN_BIT,
+ 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't turn off SUSPEND_ON_COLLAPSE_USBIN_BIT rc=%d\n",
+ rc);
+
+ smblib_rerun_apsd(chg);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#define USB_WEAK_INPUT_UA 1400000
+#define ICL_CHANGE_DELAY_MS 1000
+irqreturn_t icl_change_irq_handler(int irq, void *data)
+{
+ u8 stat;
+ int rc, settled_ua, delay = ICL_CHANGE_DELAY_MS;
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+
+ if (chg->mode == PARALLEL_MASTER) {
+ rc = smblib_read(chg, AICL_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read AICL_STATUS rc=%d\n",
+ rc);
+ return IRQ_HANDLED;
+ }
+
+ rc = smblib_get_charge_param(chg, &chg->param.icl_stat,
+ &settled_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get ICL status rc=%d\n", rc);
+ return IRQ_HANDLED;
+ }
+
+ /* If AICL settled then schedule work now */
+ if (settled_ua == get_effective_result(chg->usb_icl_votable))
+ delay = 0;
+
+ cancel_delayed_work_sync(&chg->icl_change_work);
+ schedule_delayed_work(&chg->icl_change_work,
+ msecs_to_jiffies(delay));
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void smblib_micro_usb_plugin(struct smb_charger *chg, bool vbus_rising)
+{
+ if (!vbus_rising) {
+ smblib_update_usb_type(chg);
+ smblib_notify_device_mode(chg, false);
+ smblib_uusb_removal(chg);
+ }
+}
+
+void smblib_usb_plugin_hard_reset_locked(struct smb_charger *chg)
+{
+ int rc;
+ u8 stat;
+ bool vbus_rising;
+ struct smb_irq_data *data;
+ struct storm_watch *wdata;
+
+ rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read USB_INT_RT_STS rc=%d\n", rc);
+ return;
+ }
+
+ vbus_rising = (bool)(stat & USBIN_PLUGIN_RT_STS_BIT);
+
+ if (vbus_rising) {
+ /* Remove FCC_STEPPER 1.5A init vote to allow FCC ramp up */
+ if (chg->fcc_stepper_enable)
+ vote(chg->fcc_votable, FCC_STEPPER_VOTER, false, 0);
+ } else {
+ if (chg->wa_flags & BOOST_BACK_WA) {
+ data = chg->irq_info[SWITCHER_POWER_OK_IRQ].irq_data;
+ if (data) {
+ wdata = &data->storm_data;
+ update_storm_count(wdata,
+ WEAK_CHG_STORM_COUNT);
+ vote(chg->usb_icl_votable, BOOST_BACK_VOTER,
+ false, 0);
+ vote(chg->usb_icl_votable, WEAK_CHARGER_VOTER,
+ false, 0);
+ }
+ }
+
+ /* Force 1500mA FCC on USB removal if fcc stepper is enabled */
+ if (chg->fcc_stepper_enable)
+ vote(chg->fcc_votable, FCC_STEPPER_VOTER,
+ true, 1500000);
+ }
+
+ power_supply_changed(chg->usb_psy);
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: usbin-plugin %s\n",
+ vbus_rising ? "attached" : "detached");
+}
+
+#define PL_DELAY_MS 30000
+void smblib_usb_plugin_locked(struct smb_charger *chg)
+{
+ int rc;
+ u8 stat;
+ bool vbus_rising;
+ struct smb_irq_data *data;
+ struct storm_watch *wdata;
+
+ rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read USB_INT_RT_STS rc=%d\n", rc);
+ return;
+ }
+
+ vbus_rising = (bool)(stat & USBIN_PLUGIN_RT_STS_BIT);
+ smblib_set_opt_switcher_freq(chg, vbus_rising ? chg->chg_freq.freq_5V :
+ chg->chg_freq.freq_removal);
+
+ if (vbus_rising) {
+ rc = smblib_request_dpdm(chg, true);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc);
+
+ /* Enable SW Thermal regulation */
+ rc = smblib_set_sw_thermal_regulation(chg, true);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't start SW thermal regulation WA, rc=%d\n",
+ rc);
+
+ /* Remove FCC_STEPPER 1.5A init vote to allow FCC ramp up */
+ if (chg->fcc_stepper_enable)
+ vote(chg->fcc_votable, FCC_STEPPER_VOTER, false, 0);
+
+ /* Schedule work to enable parallel charger */
+ vote(chg->awake_votable, PL_DELAY_VOTER, true, 0);
+ schedule_delayed_work(&chg->pl_enable_work,
+ msecs_to_jiffies(PL_DELAY_MS));
+ } else {
+ /* Disable SW Thermal Regulation */
+ rc = smblib_set_sw_thermal_regulation(chg, false);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't stop SW thermal regulation WA, rc=%d\n",
+ rc);
+
+ if (chg->wa_flags & BOOST_BACK_WA) {
+ data = chg->irq_info[SWITCHER_POWER_OK_IRQ].irq_data;
+ if (data) {
+ wdata = &data->storm_data;
+ update_storm_count(wdata,
+ WEAK_CHG_STORM_COUNT);
+ vote(chg->usb_icl_votable, BOOST_BACK_VOTER,
+ false, 0);
+ vote(chg->usb_icl_votable, WEAK_CHARGER_VOTER,
+ false, 0);
+ }
+ }
+
+ /* Force 1500mA FCC on removal if fcc stepper is enabled */
+ if (chg->fcc_stepper_enable)
+ vote(chg->fcc_votable, FCC_STEPPER_VOTER,
+ true, 1500000);
+
+ rc = smblib_request_dpdm(chg, false);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc);
+
+ smblib_update_usb_type(chg);
+ }
+
+ if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
+ smblib_micro_usb_plugin(chg, vbus_rising);
+
+ power_supply_changed(chg->usb_psy);
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: usbin-plugin %s\n",
+ vbus_rising ? "attached" : "detached");
+}
+
+irqreturn_t usb_plugin_irq_handler(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+
+ if (chg->pd_hard_reset)
+ smblib_usb_plugin_hard_reset_locked(chg);
+ else
+ smblib_usb_plugin_locked(chg);
+
+ return IRQ_HANDLED;
+}
+
+static void smblib_handle_slow_plugin_timeout(struct smb_charger *chg,
+ bool rising)
+{
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: slow-plugin-timeout %s\n",
+ rising ? "rising" : "falling");
+}
+
+static void smblib_handle_sdp_enumeration_done(struct smb_charger *chg,
+ bool rising)
+{
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: sdp-enumeration-done %s\n",
+ rising ? "rising" : "falling");
+}
+
+/* triggers when HVDCP 3.0 authentication has finished */
+static void smblib_handle_hvdcp_3p0_auth_done(struct smb_charger *chg,
+ bool rising)
+{
+ const struct apsd_result *apsd_result;
+ int rc;
+
+ if (!rising)
+ return;
+
+ if (chg->mode == PARALLEL_MASTER)
+ vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, true, 0);
+
+ /* the APSD done handler will set the USB supply type */
+ apsd_result = smblib_get_apsd_result(chg);
+
+ /* for QC3, switch to CP if present */
+ if ((apsd_result->bit & QC_3P0_BIT) && chg->sec_cp_present) {
+ mutex_lock(&chg->smb_lock);
+ rc = smblib_select_sec_charger(chg,
+ POWER_SUPPLY_CHARGER_SEC_CP);
+ if (rc < 0)
+ dev_err(chg->dev,
+ "Couldn't enable secondary chargers rc=%d\n", rc);
+ else
+ chg->cp_reason = POWER_SUPPLY_CP_HVDCP3;
+ mutex_unlock(&chg->smb_lock);
+ }
+
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: hvdcp-3p0-auth-done rising; %s detected\n",
+ apsd_result->name);
+}
+
+static void smblib_handle_hvdcp_check_timeout(struct smb_charger *chg,
+ bool rising, bool qc_charger)
+{
+ if (rising) {
+
+ if (qc_charger) {
+ /* enable HDC and ICL irq for QC2/3 charger */
+ vote(chg->usb_irq_enable_votable, QC_VOTER, true, 0);
+ vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true,
+ HVDCP_CURRENT_UA);
+ } else {
+ /* A plain DCP, enforce DCP ICL if specified */
+ vote(chg->usb_icl_votable, DCP_VOTER,
+ chg->dcp_icl_ua != -EINVAL, chg->dcp_icl_ua);
+ }
+ }
+
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s %s\n", __func__,
+ rising ? "rising" : "falling");
+}
+
+/* triggers when HVDCP is detected */
+static void smblib_handle_hvdcp_detect_done(struct smb_charger *chg,
+ bool rising)
+{
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: hvdcp-detect-done %s\n",
+ rising ? "rising" : "falling");
+}
+
+static void update_sw_icl_max(struct smb_charger *chg, int pst)
+{
+ int typec_mode;
+ int rp_ua;
+
+ /* while PD is active it should have complete ICL control */
+ if (chg->pd_active)
+ return;
+
+ /*
+ * HVDCP 2/3, handled separately
+ * For UNKNOWN(input not present) return without updating ICL
+ */
+ if (pst == POWER_SUPPLY_TYPE_USB_HVDCP
+ || pst == POWER_SUPPLY_TYPE_USB_HVDCP_3
+ || pst == POWER_SUPPLY_TYPE_UNKNOWN)
+ return;
+
+ /* TypeC rp med or high, use rp value */
+ typec_mode = smblib_get_prop_typec_mode(chg);
+ if (typec_rp_med_high(chg, typec_mode)) {
+ rp_ua = get_rp_based_dcp_current(chg, typec_mode);
+ vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true, rp_ua);
+ return;
+ }
+
+ /* rp-std or legacy, USB BC 1.2 */
+ switch (pst) {
+ case POWER_SUPPLY_TYPE_USB:
+ /*
+ * USB_PSY will vote to increase the current to 500/900mA once
+ * enumeration is done.
+ */
+ if (!is_client_vote_enabled(chg->usb_icl_votable,
+ USB_PSY_VOTER))
+ vote(chg->usb_icl_votable, USB_PSY_VOTER, true,
+ SDP_100_MA);
+ vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, false, 0);
+ break;
+ case POWER_SUPPLY_TYPE_USB_CDP:
+ vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true,
+ CDP_CURRENT_UA);
+ break;
+ case POWER_SUPPLY_TYPE_USB_DCP:
+ vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true,
+ DCP_CURRENT_UA);
+ break;
+ case POWER_SUPPLY_TYPE_USB_FLOAT:
+ /*
+ * limit ICL to 100mA, the USB driver will enumerate to check
+ * if this is a SDP and appropriately set the current
+ */
+ vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true,
+ SDP_100_MA);
+ break;
+ default:
+ smblib_err(chg, "Unknown APSD %d; forcing 500mA\n", pst);
+ vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true,
+ SDP_CURRENT_UA);
+ break;
+ }
+}
+
+static void smblib_handle_apsd_done(struct smb_charger *chg, bool rising)
+{
+ const struct apsd_result *apsd_result;
+
+ if (!rising)
+ return;
+
+ apsd_result = smblib_update_usb_type(chg);
+
+ update_sw_icl_max(chg, apsd_result->pst);
+
+ switch (apsd_result->bit) {
+ case SDP_CHARGER_BIT:
+ case CDP_CHARGER_BIT:
+ case FLOAT_CHARGER_BIT:
+ if (chg->use_extcon)
+ smblib_notify_device_mode(chg, true);
+ break;
+ case OCP_CHARGER_BIT:
+ case DCP_CHARGER_BIT:
+ break;
+ default:
+ break;
+ }
+
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: apsd-done rising; %s detected\n",
+ apsd_result->name);
+}
+
+irqreturn_t usb_source_change_irq_handler(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+ int rc = 0;
+ u8 stat;
+
+ /*
+ * Prepared to run PD or PD is active. At this moment, APSD is disabled,
+ * but there still can be irq on apsd_done from previously unfinished
+ * APSD run, skip it.
+ */
+ if (chg->ok_to_pd)
+ return IRQ_HANDLED;
+
+ rc = smblib_read(chg, APSD_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read APSD_STATUS rc=%d\n", rc);
+ return IRQ_HANDLED;
+ }
+ smblib_dbg(chg, PR_REGISTER, "APSD_STATUS = 0x%02x\n", stat);
+
+ if ((chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
+ && (stat & APSD_DTC_STATUS_DONE_BIT)
+ && !chg->uusb_apsd_rerun_done) {
+ /*
+ * Force re-run APSD to handle slow insertion related
+ * charger-mis-detection.
+ */
+ chg->uusb_apsd_rerun_done = true;
+ smblib_rerun_apsd_if_required(chg);
+ return IRQ_HANDLED;
+ }
+
+ smblib_handle_apsd_done(chg,
+ (bool)(stat & APSD_DTC_STATUS_DONE_BIT));
+
+ smblib_handle_hvdcp_detect_done(chg,
+ (bool)(stat & QC_CHARGER_BIT));
+
+ smblib_handle_hvdcp_check_timeout(chg,
+ (bool)(stat & HVDCP_CHECK_TIMEOUT_BIT),
+ (bool)(stat & QC_CHARGER_BIT));
+
+ smblib_handle_hvdcp_3p0_auth_done(chg,
+ (bool)(stat & QC_AUTH_DONE_STATUS_BIT));
+
+ smblib_handle_sdp_enumeration_done(chg,
+ (bool)(stat & ENUMERATION_DONE_BIT));
+
+ smblib_handle_slow_plugin_timeout(chg,
+ (bool)(stat & SLOW_PLUGIN_TIMEOUT_BIT));
+
+ smblib_hvdcp_adaptive_voltage_change(chg);
+
+ power_supply_changed(chg->usb_psy);
+
+ rc = smblib_read(chg, APSD_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read APSD_STATUS rc=%d\n", rc);
+ return IRQ_HANDLED;
+ }
+ smblib_dbg(chg, PR_REGISTER, "APSD_STATUS = 0x%02x\n", stat);
+
+ return IRQ_HANDLED;
+}
+
+enum alarmtimer_restart smblib_lpd_recheck_timer(struct alarm *alarm,
+ ktime_t time)
+{
+ union power_supply_propval pval;
+ struct smb_charger *chg = container_of(alarm, struct smb_charger,
+ lpd_recheck_timer);
+ int rc;
+
+ if (chg->lpd_reason == LPD_MOISTURE_DETECTED) {
+ pval.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+ rc = smblib_set_prop_typec_power_role(chg, &pval);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't write 0x%02x to TYPE_C_INTRPT_ENB_SOFTWARE_CTRL rc=%d\n",
+ pval.intval, rc);
+ return ALARMTIMER_NORESTART;
+ }
+ } else {
+ rc = smblib_masked_write(chg, TYPE_C_INTERRUPT_EN_CFG_2_REG,
+ TYPEC_WATER_DETECTION_INT_EN_BIT,
+ TYPEC_WATER_DETECTION_INT_EN_BIT);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set TYPE_C_INTERRUPT_EN_CFG_2_REG rc=%d\n",
+ rc);
+ return ALARMTIMER_NORESTART;
+ }
+ }
+
+ chg->lpd_stage = LPD_STAGE_NONE;
+ chg->lpd_reason = LPD_NONE;
+
+ return ALARMTIMER_NORESTART;
+}
+
+#define RSBU_K_300K_UV 3000000
+static bool smblib_src_lpd(struct smb_charger *chg)
+{
+ union power_supply_propval pval;
+ bool lpd_flag = false;
+ u8 stat;
+ int rc;
+
+ rc = smblib_read(chg, TYPE_C_SRC_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_SRC_STATUS_REG rc=%d\n",
+ rc);
+ return false;
+ }
+
+ switch (stat & DETECTED_SNK_TYPE_MASK) {
+ case SRC_DEBUG_ACCESS_BIT:
+ if (smblib_rsbux_low(chg, RSBU_K_300K_UV))
+ lpd_flag = true;
+ break;
+ case SRC_RD_RA_VCONN_BIT:
+ case SRC_RD_OPEN_BIT:
+ case AUDIO_ACCESS_RA_RA_BIT:
+ default:
+ break;
+ }
+
+ if (lpd_flag) {
+ chg->lpd_stage = LPD_STAGE_COMMIT;
+ pval.intval = POWER_SUPPLY_TYPEC_PR_SINK;
+ rc = smblib_set_prop_typec_power_role(chg, &pval);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't write 0x%02x to TYPE_C_INTRPT_ENB_SOFTWARE_CTRL rc=%d\n",
+ pval.intval, rc);
+ chg->lpd_reason = LPD_MOISTURE_DETECTED;
+ alarm_start_relative(&chg->lpd_recheck_timer,
+ ms_to_ktime(60000));
+ } else {
+ chg->lpd_reason = LPD_NONE;
+ chg->typec_mode = smblib_get_prop_typec_mode(chg);
+ }
+
+ return lpd_flag;
+}
+
+static void typec_sink_insertion(struct smb_charger *chg)
+{
+ vote(chg->usb_icl_votable, OTG_VOTER, true, 0);
+
+ if (chg->use_extcon) {
+ smblib_notify_usb_host(chg, true);
+ chg->otg_present = true;
+ }
+
+ if (!chg->pr_swap_in_progress)
+ chg->ok_to_pd = (!(chg->pd_disabled) || chg->early_usb_attach)
+ && !chg->pd_not_supported;
+}
+
+static void typec_src_insertion(struct smb_charger *chg)
+{
+ int rc = 0;
+ u8 stat;
+
+ if (chg->pr_swap_in_progress)
+ return;
+
+ rc = smblib_read(chg, LEGACY_CABLE_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_STATE_MACHINE_STATUS_REG rc=%d\n",
+ rc);
+ return;
+ }
+
+ chg->typec_legacy = stat & TYPEC_LEGACY_CABLE_STATUS_BIT;
+ chg->ok_to_pd = (!(chg->typec_legacy || chg->pd_disabled)
+ || chg->early_usb_attach) && !chg->pd_not_supported;
+ if (!chg->ok_to_pd) {
+ rc = smblib_configure_hvdcp_apsd(chg, true);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't enable APSD rc=%d\n", rc);
+ return;
+ }
+ smblib_rerun_apsd_if_required(chg);
+ }
+}
+
+static void typec_sink_removal(struct smb_charger *chg)
+{
+ vote(chg->usb_icl_votable, OTG_VOTER, false, 0);
+
+ if (chg->use_extcon) {
+ if (chg->otg_present)
+ smblib_notify_usb_host(chg, false);
+ chg->otg_present = false;
+ }
+}
+
+static void typec_src_removal(struct smb_charger *chg)
+{
+ int rc;
+ struct smb_irq_data *data;
+ struct storm_watch *wdata;
+
+ mutex_lock(&chg->smb_lock);
+ chg->cp_reason = POWER_SUPPLY_CP_NONE;
+ rc = smblib_select_sec_charger(chg,
+ chg->sec_pl_present ? POWER_SUPPLY_CHARGER_SEC_PL :
+ POWER_SUPPLY_CHARGER_SEC_NONE);
+ if (rc < 0)
+ dev_err(chg->dev,
+ "Couldn't disable secondary charger rc=%d\n", rc);
+ mutex_unlock(&chg->smb_lock);
+
+ /* disable apsd */
+ rc = smblib_configure_hvdcp_apsd(chg, false);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't disable APSD rc=%d\n", rc);
+
+ smblib_update_usb_type(chg);
+
+ if (chg->wa_flags & BOOST_BACK_WA) {
+ data = chg->irq_info[SWITCHER_POWER_OK_IRQ].irq_data;
+ if (data) {
+ wdata = &data->storm_data;
+ update_storm_count(wdata, WEAK_CHG_STORM_COUNT);
+ vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0);
+ vote(chg->usb_icl_votable, WEAK_CHARGER_VOTER,
+ false, 0);
+ }
+ }
+
+ cancel_delayed_work_sync(&chg->pl_enable_work);
+
+ /* reset input current limit voters */
+ vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true, SDP_100_MA);
+ vote(chg->usb_icl_votable, PD_VOTER, false, 0);
+ vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
+ vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
+ vote(chg->usb_icl_votable, SW_QC3_VOTER, false, 0);
+ vote(chg->usb_icl_votable, OTG_VOTER, false, 0);
+ vote(chg->usb_icl_votable, CTM_VOTER, false, 0);
+
+ /* reset usb irq voters */
+ vote(chg->usb_irq_enable_votable, PD_VOTER, false, 0);
+ vote(chg->usb_irq_enable_votable, QC_VOTER, false, 0);
+
+ /* reset parallel voters */
+ vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
+ vote(chg->pl_disable_votable, PL_FCC_LOW_VOTER, false, 0);
+ vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
+ vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
+ vote(chg->awake_votable, PL_DELAY_VOTER, false, 0);
+
+ /* Remove SW thermal regulation WA votes */
+ vote(chg->usb_icl_votable, SW_THERM_REGULATION_VOTER, false, 0);
+ vote(chg->pl_disable_votable, SW_THERM_REGULATION_VOTER, false, 0);
+ vote(chg->dc_suspend_votable, SW_THERM_REGULATION_VOTER, false, 0);
+ if (chg->cp_disable_votable)
+ vote(chg->cp_disable_votable, SW_THERM_REGULATION_VOTER,
+ false, 0);
+
+ chg->pulse_cnt = 0;
+ chg->usb_icl_delta_ua = 0;
+ chg->voltage_min_uv = MICRO_5V;
+ chg->voltage_max_uv = MICRO_5V;
+
+ /* write back the default FLOAT charger configuration */
+ rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+ (u8)FLOAT_OPTIONS_MASK, chg->float_cfg);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't write float charger options rc=%d\n",
+ rc);
+
+ /* reconfigure allowed voltage for HVDCP */
+ rc = smblib_set_adapter_allowance(chg,
+ USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't set USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V rc=%d\n",
+ rc);
+
+ /*
+ * if non-compliant charger caused UV, restore original max pulses
+ * and turn SUSPEND_ON_COLLAPSE_USBIN_BIT back on.
+ */
+ if (chg->qc2_unsupported_voltage) {
+ rc = smblib_masked_write(chg, HVDCP_PULSE_COUNT_MAX_REG,
+ HVDCP_PULSE_COUNT_MAX_QC2_MASK,
+ chg->qc2_max_pulses);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't restore max pulses rc=%d\n",
+ rc);
+
+ rc = smblib_masked_write(chg, USBIN_AICL_OPTIONS_CFG_REG,
+ SUSPEND_ON_COLLAPSE_USBIN_BIT,
+ SUSPEND_ON_COLLAPSE_USBIN_BIT);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't turn on SUSPEND_ON_COLLAPSE_USBIN_BIT rc=%d\n",
+ rc);
+
+ chg->qc2_unsupported_voltage = QC2_COMPLIANT;
+ }
+
+ if (chg->use_extcon)
+ smblib_notify_device_mode(chg, false);
+
+ chg->typec_legacy = false;
+}
+
+static void smblib_handle_rp_change(struct smb_charger *chg, int typec_mode)
+{
+ const struct apsd_result *apsd = smblib_get_apsd_result(chg);
+
+ /*
+ * We want the ICL vote @ 100mA for a FLOAT charger
+ * until the detection by the USB stack is complete.
+ * Ignore the Rp changes unless there is a
+ * pre-existing valid vote or FLOAT is configured for
+ * SDP current.
+ */
+ if (apsd->pst == POWER_SUPPLY_TYPE_USB_FLOAT) {
+ if (get_client_vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER)
+ <= USBIN_100MA
+ || (chg->float_cfg & FLOAT_OPTIONS_MASK)
+ == FORCE_FLOAT_SDP_CFG_BIT)
+ return;
+ }
+
+ update_sw_icl_max(chg, apsd->pst);
+
+ smblib_dbg(chg, PR_MISC, "CC change old_mode=%d new_mode=%d\n",
+ chg->typec_mode, typec_mode);
+}
+
+irqreturn_t typec_or_rid_detection_change_irq_handler(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+ u8 stat;
+ int rc;
+
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
+ if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB) {
+ cancel_delayed_work_sync(&chg->uusb_otg_work);
+ vote(chg->awake_votable, OTG_DELAY_VOTER, true, 0);
+ smblib_dbg(chg, PR_INTERRUPT, "Scheduling OTG work\n");
+ schedule_delayed_work(&chg->uusb_otg_work,
+ msecs_to_jiffies(chg->otg_delay_ms));
+ goto out;
+ }
+
+ if (chg->pr_swap_in_progress || chg->pd_hard_reset)
+ goto out;
+
+ rc = smblib_read(chg, TYPE_C_MISC_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_MISC_STATUS_REG rc=%d\n",
+ rc);
+ goto out;
+ }
+
+ if (!(stat & TYPEC_TCCDEBOUNCE_DONE_STATUS_BIT)
+ && chg->lpd_stage == LPD_STAGE_NONE) {
+ chg->lpd_stage = LPD_STAGE_FLOAT;
+ cancel_delayed_work_sync(&chg->lpd_ra_open_work);
+ vote(chg->awake_votable, LPD_VOTER, true, 0);
+ schedule_delayed_work(&chg->lpd_ra_open_work,
+ msecs_to_jiffies(300));
+ }
+
+ if (chg->usb_psy)
+ power_supply_changed(chg->usb_psy);
+
+out:
+ return IRQ_HANDLED;
+}
+
+irqreturn_t typec_state_change_irq_handler(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+ int typec_mode;
+
+ if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB) {
+ smblib_dbg(chg, PR_INTERRUPT,
+ "Ignoring for micro USB\n");
+ return IRQ_HANDLED;
+ }
+
+ typec_mode = smblib_get_prop_typec_mode(chg);
+ if (chg->sink_src_mode != UNATTACHED_MODE
+ && (typec_mode != chg->typec_mode))
+ smblib_handle_rp_change(chg, typec_mode);
+ chg->typec_mode = typec_mode;
+
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: cc-state-change; Type-C %s detected\n",
+ smblib_typec_mode_name[chg->typec_mode]);
+
+ power_supply_changed(chg->usb_psy);
+
+ return IRQ_HANDLED;
+}
+
+
+irqreturn_t typec_attach_detach_irq_handler(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+ u8 stat;
+ int rc;
+
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
+ rc = smblib_read(chg, TYPE_C_STATE_MACHINE_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_STATE_MACHINE_STATUS_REG rc=%d\n",
+ rc);
+ return IRQ_HANDLED;
+ }
+
+ if (stat & TYPEC_ATTACH_DETACH_STATE_BIT) {
+ chg->lpd_stage = LPD_STAGE_FLOAT_CANCEL;
+ cancel_delayed_work_sync(&chg->lpd_ra_open_work);
+ vote(chg->awake_votable, LPD_VOTER, false, 0);
+
+ rc = smblib_read(chg, TYPE_C_MISC_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_MISC_STATUS_REG rc=%d\n",
+ rc);
+ return IRQ_HANDLED;
+ }
+
+ if (stat & SNK_SRC_MODE_BIT) {
+ if (smblib_src_lpd(chg))
+ return IRQ_HANDLED;
+ chg->sink_src_mode = SRC_MODE;
+ typec_sink_insertion(chg);
+ } else {
+ chg->sink_src_mode = SINK_MODE;
+ typec_src_insertion(chg);
+ }
+
+ } else {
+ switch (chg->sink_src_mode) {
+ case SRC_MODE:
+ typec_sink_removal(chg);
+ break;
+ case SINK_MODE:
+ typec_src_removal(chg);
+ break;
+ default:
+ break;
+ }
+
+ if (!chg->pr_swap_in_progress) {
+ chg->ok_to_pd = false;
+ chg->sink_src_mode = UNATTACHED_MODE;
+ chg->early_usb_attach = false;
+ }
+
+ if (chg->lpd_stage == LPD_STAGE_FLOAT_CANCEL)
+ schedule_delayed_work(&chg->lpd_detach_work,
+ msecs_to_jiffies(100));
+ }
+
+ power_supply_changed(chg->usb_psy);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t dc_plugin_irq_handler(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+ union power_supply_propval pval;
+ int input_present;
+ bool dcin_present, vbus_present;
+ int rc, wireless_vout = 0;
+
+ rc = iio_read_channel_processed(chg->iio.vph_v_chan,
+ &wireless_vout);
+ if (rc < 0)
+ return IRQ_HANDLED;
+
+ wireless_vout *= 2;
+ wireless_vout /= 100000;
+ wireless_vout *= 100000;
+
+ rc = smblib_is_input_present(chg, &input_present);
+ if (rc < 0)
+ return IRQ_HANDLED;
+
+ dcin_present = input_present & INPUT_PRESENT_DC;
+ vbus_present = input_present & INPUT_PRESENT_USB;
+
+ if (dcin_present) {
+ if (!vbus_present && chg->sec_cp_present) {
+ pval.intval = wireless_vout;
+ rc = smblib_set_prop_voltage_wls_output(chg, &pval);
+ if (rc < 0)
+ dev_err(chg->dev, "Couldn't set dc voltage to 2*vph rc=%d\n",
+ rc);
+
+ mutex_lock(&chg->smb_lock);
+ chg->cp_reason = POWER_SUPPLY_CP_WIRELESS;
+ rc = smblib_select_sec_charger(chg,
+ POWER_SUPPLY_CHARGER_SEC_CP);
+ if (rc < 0)
+ dev_err(chg->dev, "Couldn't enable secondary chargers rc=%d\n",
+ rc);
+ mutex_unlock(&chg->smb_lock);
+ }
+ } else if (chg->cp_reason == POWER_SUPPLY_CP_WIRELESS) {
+ mutex_lock(&chg->smb_lock);
+ chg->cp_reason = POWER_SUPPLY_CP_NONE;
+ rc = smblib_select_sec_charger(chg,
+ chg->sec_pl_present ? POWER_SUPPLY_CHARGER_SEC_PL :
+ POWER_SUPPLY_CHARGER_SEC_NONE);
+ if (rc < 0)
+ dev_err(chg->dev,
+ "Couldn't disable secondary charger rc=%d\n",
+ rc);
+ mutex_unlock(&chg->smb_lock);
+ }
+
+ power_supply_changed(chg->dc_psy);
+
+ smblib_dbg(chg, PR_WLS, "dcin_present= %d, usbin_present= %d, cp_reason = %d\n",
+ dcin_present, vbus_present, chg->cp_reason);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t high_duty_cycle_irq_handler(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+
+ chg->is_hdc = true;
+ /*
+ * Disable usb IRQs after the flag set and re-enable IRQs after
+ * the flag cleared in the delayed work queue, to avoid any IRQ
+ * storming during the delays
+ */
+ if (chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq)
+ disable_irq_nosync(chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq);
+
+ schedule_delayed_work(&chg->clear_hdc_work, msecs_to_jiffies(60));
+
+ return IRQ_HANDLED;
+}
+
+static void smblib_bb_removal_work(struct work_struct *work)
+{
+ struct smb_charger *chg = container_of(work, struct smb_charger,
+ bb_removal_work.work);
+
+ vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0);
+ vote(chg->awake_votable, BOOST_BACK_VOTER, false, 0);
+}
+
+#define BOOST_BACK_UNVOTE_DELAY_MS 750
+#define BOOST_BACK_STORM_COUNT 3
+#define WEAK_CHG_STORM_COUNT 8
+irqreturn_t switcher_power_ok_irq_handler(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+ struct storm_watch *wdata = &irq_data->storm_data;
+ int rc, usb_icl;
+ u8 stat;
+
+ if (!(chg->wa_flags & BOOST_BACK_WA))
+ return IRQ_HANDLED;
+
+ rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n", rc);
+ return IRQ_HANDLED;
+ }
+
+ /* skip suspending input if its already suspended by some other voter */
+ usb_icl = get_effective_result(chg->usb_icl_votable);
+ if ((stat & USE_USBIN_BIT) && usb_icl >= 0 && usb_icl <= USBIN_25MA)
+ return IRQ_HANDLED;
+
+ if (stat & USE_DCIN_BIT)
+ return IRQ_HANDLED;
+
+ if (is_storming(&irq_data->storm_data)) {
+ /* This could be a weak charger reduce ICL */
+ if (!is_client_vote_enabled(chg->usb_icl_votable,
+ WEAK_CHARGER_VOTER)) {
+ smblib_err(chg,
+ "Weak charger detected: voting %dmA ICL\n",
+ chg->weak_chg_icl_ua / 1000);
+ vote(chg->usb_icl_votable, WEAK_CHARGER_VOTER,
+ true, chg->weak_chg_icl_ua);
+ /*
+ * reset storm data and set the storm threshold
+ * to 3 for reverse boost detection.
+ */
+ update_storm_count(wdata, BOOST_BACK_STORM_COUNT);
+ } else {
+ smblib_err(chg,
+ "Reverse boost detected: voting 0mA to suspend input\n");
+ vote(chg->usb_icl_votable, BOOST_BACK_VOTER, true, 0);
+ vote(chg->awake_votable, BOOST_BACK_VOTER, true, 0);
+ /*
+ * Remove the boost-back vote after a delay, to avoid
+ * permanently suspending the input if the boost-back
+ * condition is unintentionally hit.
+ */
+ schedule_delayed_work(&chg->bb_removal_work,
+ msecs_to_jiffies(BOOST_BACK_UNVOTE_DELAY_MS));
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t wdog_snarl_irq_handler(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
+ if (chg->wa_flags & SW_THERM_REGULATION_WA) {
+ cancel_delayed_work_sync(&chg->thermal_regulation_work);
+ vote(chg->awake_votable, SW_THERM_REGULATION_VOTER, true, 0);
+ schedule_delayed_work(&chg->thermal_regulation_work, 0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t wdog_bark_irq_handler(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+ int rc;
+
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
+ rc = smblib_write(chg, BARK_BITE_WDOG_PET_REG, BARK_BITE_WDOG_PET_BIT);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't pet the dog rc=%d\n", rc);
+
+ if (chg->step_chg_enabled || chg->sw_jeita_enabled)
+ power_supply_changed(chg->batt_psy);
+
+ return IRQ_HANDLED;
+}
+
+/**************
+ * Additional USB PSY getters/setters
+ * that call interrupt functions
+ ***************/
+
+int smblib_get_prop_pr_swap_in_progress(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ val->intval = chg->pr_swap_in_progress;
+ return 0;
+}
+
+int smblib_set_prop_pr_swap_in_progress(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ int rc;
+ u8 stat, orientation;
+
+ chg->pr_swap_in_progress = val->intval;
+
+ rc = smblib_masked_write(chg, TYPE_C_DEBOUNCE_OPTION_REG,
+ REDUCE_TCCDEBOUNCE_TO_2MS_BIT,
+ val->intval ? REDUCE_TCCDEBOUNCE_TO_2MS_BIT : 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't set tCC debounce rc=%d\n", rc);
+
+ rc = smblib_masked_write(chg, TYPE_C_EXIT_STATE_CFG_REG,
+ BYPASS_VSAFE0V_DURING_ROLE_SWAP_BIT,
+ val->intval ? BYPASS_VSAFE0V_DURING_ROLE_SWAP_BIT : 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't set exit state cfg rc=%d\n", rc);
+
+ if (chg->pr_swap_in_progress) {
+ rc = smblib_read(chg, TYPE_C_MISC_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n",
+ rc);
+ }
+
+ orientation =
+ stat & CC_ORIENTATION_BIT ? TYPEC_CCOUT_VALUE_BIT : 0;
+ rc = smblib_masked_write(chg, TYPE_C_CCOUT_CONTROL_REG,
+ TYPEC_CCOUT_SRC_BIT | TYPEC_CCOUT_BUFFER_EN_BIT
+ | TYPEC_CCOUT_VALUE_BIT,
+ TYPEC_CCOUT_SRC_BIT | TYPEC_CCOUT_BUFFER_EN_BIT
+ | orientation);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_CCOUT_CONTROL_REG rc=%d\n",
+ rc);
+ }
+ } else {
+ rc = smblib_masked_write(chg, TYPE_C_CCOUT_CONTROL_REG,
+ TYPEC_CCOUT_SRC_BIT, 0);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_CCOUT_CONTROL_REG rc=%d\n",
+ rc);
+ }
+
+ /* enable DRP */
+ rc = smblib_masked_write(chg, TYPE_C_MODE_CFG_REG,
+ TYPEC_POWER_ROLE_CMD_MASK, 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't enable DRP rc=%d\n", rc);
+ }
+
+ return 0;
+}
+
+/***************
+ * Work Queues *
+ ***************/
+static void smblib_uusb_otg_work(struct work_struct *work)
+{
+ struct smb_charger *chg = container_of(work, struct smb_charger,
+ uusb_otg_work.work);
+ int rc;
+ u8 stat;
+ bool otg;
+
+ rc = smblib_read(chg, TYPEC_U_USB_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_3 rc=%d\n", rc);
+ goto out;
+ }
+ otg = !!(stat & U_USB_GROUND_NOVBUS_BIT);
+ if (chg->otg_present != otg)
+ smblib_notify_usb_host(chg, otg);
+ else
+ goto out;
+
+ chg->otg_present = otg;
+ if (!otg)
+ chg->boost_current_ua = 0;
+
+ rc = smblib_set_charge_param(chg, &chg->param.freq_switcher,
+ otg ? chg->chg_freq.freq_below_otg_threshold
+ : chg->chg_freq.freq_removal);
+ if (rc < 0)
+ dev_err(chg->dev, "Error in setting freq_boost rc=%d\n", rc);
+
+ smblib_dbg(chg, PR_REGISTER, "TYPE_C_U_USB_STATUS = 0x%02x OTG=%d\n",
+ stat, otg);
+ power_supply_changed(chg->usb_psy);
+
+out:
+ vote(chg->awake_votable, OTG_DELAY_VOTER, false, 0);
+}
+
+static void bms_update_work(struct work_struct *work)
+{
+ struct smb_charger *chg = container_of(work, struct smb_charger,
+ bms_update_work);
+
+ smblib_suspend_on_debug_battery(chg);
+
+ if (chg->batt_psy)
+ power_supply_changed(chg->batt_psy);
+}
+
+static void pl_update_work(struct work_struct *work)
+{
+ union power_supply_propval prop_val;
+ struct smb_charger *chg = container_of(work, struct smb_charger,
+ pl_update_work);
+ int rc;
+
+ if (chg->smb_temp_max == -EINVAL) {
+ rc = smblib_get_thermal_threshold(chg,
+ SMB_REG_H_THRESHOLD_MSB_REG,
+ &chg->smb_temp_max);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't get charger_temp_max rc=%d\n",
+ rc);
+ return;
+ }
+ }
+
+ prop_val.intval = chg->smb_temp_max;
+ rc = power_supply_set_property(chg->pl.psy,
+ POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
+ &prop_val);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't set POWER_SUPPLY_PROP_CHARGER_TEMP_MAX rc=%d\n",
+ rc);
+ return;
+ }
+
+ if (chg->sec_chg_selected == POWER_SUPPLY_CHARGER_SEC_CP)
+ return;
+
+ mutex_lock(&chg->smb_lock);
+ smblib_select_sec_charger(chg, POWER_SUPPLY_CHARGER_SEC_PL);
+ mutex_unlock(&chg->smb_lock);
+}
+
+static void clear_hdc_work(struct work_struct *work)
+{
+ struct smb_charger *chg = container_of(work, struct smb_charger,
+ clear_hdc_work.work);
+
+ chg->is_hdc = false;
+ if (chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq)
+ enable_irq(chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq);
+}
+
+static void smblib_icl_change_work(struct work_struct *work)
+{
+ struct smb_charger *chg = container_of(work, struct smb_charger,
+ icl_change_work.work);
+ int rc, settled_ua;
+
+ rc = smblib_get_charge_param(chg, &chg->param.icl_stat, &settled_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get ICL status rc=%d\n", rc);
+ return;
+ }
+
+ power_supply_changed(chg->usb_main_psy);
+
+ smblib_dbg(chg, PR_INTERRUPT, "icl_settled=%d\n", settled_ua);
+}
+
+static void smblib_pl_enable_work(struct work_struct *work)
+{
+ struct smb_charger *chg = container_of(work, struct smb_charger,
+ pl_enable_work.work);
+
+ smblib_dbg(chg, PR_PARALLEL, "timer expired, enabling parallel\n");
+ vote(chg->pl_disable_votable, PL_DELAY_VOTER, false, 0);
+ vote(chg->awake_votable, PL_DELAY_VOTER, false, 0);
+}
+
+static void smblib_thermal_regulation_work(struct work_struct *work)
+{
+ struct smb_charger *chg = container_of(work, struct smb_charger,
+ thermal_regulation_work.work);
+ int rc;
+
+ rc = smblib_update_thermal_readings(chg);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't read current thermal values %d\n",
+ rc);
+
+ rc = smblib_process_thermal_readings(chg);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't run sw thermal regulation %d\n",
+ rc);
+}
+
+#define JEITA_SOFT 0
+#define JEITA_HARD 1
+static int smblib_update_jeita(struct smb_charger *chg, u32 *thresholds,
+ int type)
+{
+ int rc;
+ u16 temp, base;
+
+ base = CHGR_JEITA_THRESHOLD_BASE_REG(type);
+
+ temp = thresholds[1] & 0xFFFF;
+ temp = ((temp & 0xFF00) >> 8) | ((temp & 0xFF) << 8);
+ rc = smblib_batch_write(chg, base, (u8 *)&temp, 2);
+ if (rc < 0) {
+ smblib_err(chg,
+ "Couldn't configure Jeita %s hot threshold rc=%d\n",
+ (type == JEITA_SOFT) ? "Soft" : "Hard", rc);
+ return rc;
+ }
+
+ temp = thresholds[0] & 0xFFFF;
+ temp = ((temp & 0xFF00) >> 8) | ((temp & 0xFF) << 8);
+ rc = smblib_batch_write(chg, base + 2, (u8 *)&temp, 2);
+ if (rc < 0) {
+ smblib_err(chg,
+ "Couldn't configure Jeita %s cold threshold rc=%d\n",
+ (type == JEITA_SOFT) ? "Soft" : "Hard", rc);
+ return rc;
+ }
+
+ smblib_dbg(chg, PR_MISC, "%s Jeita threshold configured\n",
+ (type == JEITA_SOFT) ? "Soft" : "Hard");
+
+ return 0;
+}
+
+static void jeita_update_work(struct work_struct *work)
+{
+ struct smb_charger *chg = container_of(work, struct smb_charger,
+ jeita_update_work);
+ struct device_node *node = chg->dev->of_node;
+ struct device_node *batt_node, *pnode;
+ union power_supply_propval val;
+ int rc;
+ u32 jeita_thresholds[2];
+
+ batt_node = of_find_node_by_name(node, "qcom,battery-data");
+ if (!batt_node) {
+ smblib_err(chg, "Batterydata not available\n");
+ goto out;
+ }
+
+ /* if BMS is not ready, defer the work */
+ if (!chg->bms_psy)
+ return;
+
+ rc = smblib_get_prop_from_bms(chg,
+ POWER_SUPPLY_PROP_RESISTANCE_ID, &val);
+ if (rc < 0) {
+ smblib_err(chg, "Failed to get batt-id rc=%d\n", rc);
+ goto out;
+ }
+
+ /* if BMS hasn't read out the batt_id yet, defer the work */
+ if (val.intval <= 0)
+ return;
+
+ pnode = of_batterydata_get_best_profile(batt_node,
+ val.intval / 1000, NULL);
+ if (IS_ERR(pnode)) {
+ rc = PTR_ERR(pnode);
+ smblib_err(chg, "Failed to detect valid battery profile %d\n",
+ rc);
+ goto out;
+ }
+
+ rc = of_property_read_u32_array(pnode, "qcom,jeita-hard-thresholds",
+ jeita_thresholds, 2);
+ if (!rc) {
+ rc = smblib_update_jeita(chg, jeita_thresholds, JEITA_HARD);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't configure Hard Jeita rc=%d\n",
+ rc);
+ goto out;
+ }
+ }
+
+ rc = of_property_read_u32_array(pnode, "qcom,jeita-soft-thresholds",
+ jeita_thresholds, 2);
+ if (!rc) {
+ rc = smblib_update_jeita(chg, jeita_thresholds, JEITA_SOFT);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't configure Soft Jeita rc=%d\n",
+ rc);
+ goto out;
+ }
+ }
+
+out:
+ chg->jeita_configured = true;
+}
+
+static void smblib_lpd_ra_open_work(struct work_struct *work)
+{
+ struct smb_charger *chg = container_of(work, struct smb_charger,
+ lpd_ra_open_work.work);
+ union power_supply_propval pval;
+ u8 stat;
+ int rc;
+
+ if (chg->pr_swap_in_progress || chg->pd_hard_reset) {
+ chg->lpd_stage = LPD_STAGE_NONE;
+ goto out;
+ }
+
+ if (chg->lpd_stage != LPD_STAGE_FLOAT)
+ goto out;
+
+ rc = smblib_read(chg, TYPE_C_MISC_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_MISC_STATUS_REG rc=%d\n",
+ rc);
+ goto out;
+ }
+
+ /* quit if moisture status is gone or in attached state */
+ if (!(stat & TYPEC_WATER_DETECTION_STATUS_BIT)
+ || (stat & TYPEC_TCCDEBOUNCE_DONE_STATUS_BIT)) {
+ chg->lpd_stage = LPD_STAGE_NONE;
+ goto out;
+ }
+
+ chg->lpd_stage = LPD_STAGE_COMMIT;
+
+ /* Enable source only mode */
+ pval.intval = POWER_SUPPLY_TYPEC_PR_SOURCE;
+ rc = smblib_set_prop_typec_power_role(chg, &pval);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set typec source only mode rc=%d\n",
+ rc);
+ goto out;
+ }
+
+ /* Wait 1.5ms to read src status */
+ usleep_range(1500, 1510);
+
+ rc = smblib_read(chg, TYPE_C_SRC_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_SRC_STATUS_REG rc=%d\n",
+ rc);
+ goto out;
+ }
+
+ if (smblib_rsbux_low(chg, RSBU_K_300K_UV)) {
+ /* Moisture detected, enable sink only mode */
+ pval.intval = POWER_SUPPLY_TYPEC_PR_SINK;
+ rc = smblib_set_prop_typec_power_role(chg, &pval);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set typec sink only rc=%d\n",
+ rc);
+ goto out;
+ }
+
+ chg->lpd_reason = LPD_MOISTURE_DETECTED;
+
+ } else {
+ /* Floating cable, disable water detection irq temporarily */
+ rc = smblib_masked_write(chg, TYPE_C_INTERRUPT_EN_CFG_2_REG,
+ TYPEC_WATER_DETECTION_INT_EN_BIT, 0);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set TYPE_C_INTERRUPT_EN_CFG_2_REG rc=%d\n",
+ rc);
+ goto out;
+ }
+
+ /* restore DRP mode */
+ pval.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+ rc = smblib_set_prop_typec_power_role(chg, &pval);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't write 0x%02x to TYPE_C_INTRPT_ENB_SOFTWARE_CTRL rc=%d\n",
+ pval.intval, rc);
+ goto out;
+ }
+
+ chg->lpd_reason = LPD_FLOATING_CABLE;
+ }
+
+ /* recheck in 60 seconds */
+ alarm_start_relative(&chg->lpd_recheck_timer, ms_to_ktime(60000));
+out:
+ vote(chg->awake_votable, LPD_VOTER, false, 0);
+}
+
+static void smblib_lpd_detach_work(struct work_struct *work)
+{
+ struct smb_charger *chg = container_of(work, struct smb_charger,
+ lpd_detach_work.work);
+
+ if (chg->lpd_stage == LPD_STAGE_FLOAT_CANCEL)
+ chg->lpd_stage = LPD_STAGE_NONE;
+}
+
+static int smblib_create_votables(struct smb_charger *chg)
+{
+ int rc = 0;
+
+ chg->fcc_votable = find_votable("FCC");
+ if (chg->fcc_votable == NULL) {
+ rc = -EINVAL;
+ smblib_err(chg, "Couldn't find FCC votable rc=%d\n", rc);
+ return rc;
+ }
+
+ chg->fv_votable = find_votable("FV");
+ if (chg->fv_votable == NULL) {
+ rc = -EINVAL;
+ smblib_err(chg, "Couldn't find FV votable rc=%d\n", rc);
+ return rc;
+ }
+
+ chg->usb_icl_votable = find_votable("USB_ICL");
+ if (chg->usb_icl_votable == NULL) {
+ rc = -EINVAL;
+ smblib_err(chg, "Couldn't find USB_ICL votable rc=%d\n", rc);
+ return rc;
+ }
+
+ chg->pl_disable_votable = find_votable("PL_DISABLE");
+ if (chg->pl_disable_votable == NULL) {
+ rc = -EINVAL;
+ smblib_err(chg, "Couldn't find votable PL_DISABLE rc=%d\n", rc);
+ return rc;
+ }
+
+ chg->pl_enable_votable_indirect = find_votable("PL_ENABLE_INDIRECT");
+ if (chg->pl_enable_votable_indirect == NULL) {
+ rc = -EINVAL;
+ smblib_err(chg,
+ "Couldn't find votable PL_ENABLE_INDIRECT rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
+
+ chg->dc_suspend_votable = create_votable("DC_SUSPEND", VOTE_SET_ANY,
+ smblib_dc_suspend_vote_callback,
+ chg);
+ if (IS_ERR(chg->dc_suspend_votable)) {
+ rc = PTR_ERR(chg->dc_suspend_votable);
+ chg->dc_suspend_votable = NULL;
+ return rc;
+ }
+
+ chg->awake_votable = create_votable("AWAKE", VOTE_SET_ANY,
+ smblib_awake_vote_callback,
+ chg);
+ if (IS_ERR(chg->awake_votable)) {
+ rc = PTR_ERR(chg->awake_votable);
+ chg->awake_votable = NULL;
+ return rc;
+ }
+
+ chg->chg_disable_votable = create_votable("CHG_DISABLE", VOTE_SET_ANY,
+ smblib_chg_disable_vote_callback,
+ chg);
+ if (IS_ERR(chg->chg_disable_votable)) {
+ rc = PTR_ERR(chg->chg_disable_votable);
+ chg->chg_disable_votable = NULL;
+ return rc;
+ }
+
+ chg->usb_irq_enable_votable = create_votable("USB_IRQ_DISABLE",
+ VOTE_SET_ANY,
+ smblib_usb_irq_enable_vote_callback,
+ chg);
+ if (IS_ERR(chg->usb_irq_enable_votable)) {
+ rc = PTR_ERR(chg->usb_irq_enable_votable);
+ chg->usb_irq_enable_votable = NULL;
+ return rc;
+ }
+
+ chg->wdog_snarl_irq_en_votable = create_votable("SNARL_WDOG_IRQ_ENABLE",
+ VOTE_SET_ANY,
+ smblib_wdog_snarl_irq_en_vote_callback,
+ chg);
+ if (IS_ERR(chg->wdog_snarl_irq_en_votable)) {
+ rc = PTR_ERR(chg->wdog_snarl_irq_en_votable);
+ chg->wdog_snarl_irq_en_votable = NULL;
+ return rc;
+ }
+
+ return rc;
+}
+
+static void smblib_destroy_votables(struct smb_charger *chg)
+{
+ if (chg->dc_suspend_votable)
+ destroy_votable(chg->dc_suspend_votable);
+ if (chg->usb_icl_votable)
+ destroy_votable(chg->usb_icl_votable);
+ if (chg->awake_votable)
+ destroy_votable(chg->awake_votable);
+ if (chg->chg_disable_votable)
+ destroy_votable(chg->chg_disable_votable);
+}
+
+static void smblib_iio_deinit(struct smb_charger *chg)
+{
+ if (!IS_ERR_OR_NULL(chg->iio.usbin_v_chan))
+ iio_channel_release(chg->iio.usbin_v_chan);
+ if (!IS_ERR_OR_NULL(chg->iio.usbin_i_chan))
+ iio_channel_release(chg->iio.usbin_i_chan);
+ if (!IS_ERR_OR_NULL(chg->iio.temp_chan))
+ iio_channel_release(chg->iio.temp_chan);
+ if (!IS_ERR_OR_NULL(chg->iio.sbux_chan))
+ iio_channel_release(chg->iio.sbux_chan);
+ if (!IS_ERR_OR_NULL(chg->iio.vph_v_chan))
+ iio_channel_release(chg->iio.vph_v_chan);
+}
+
+int smblib_init(struct smb_charger *chg)
+{
+ union power_supply_propval prop_val;
+ int rc = 0;
+
+ mutex_init(&chg->smb_lock);
+ INIT_WORK(&chg->bms_update_work, bms_update_work);
+ INIT_WORK(&chg->pl_update_work, pl_update_work);
+ INIT_WORK(&chg->jeita_update_work, jeita_update_work);
+ INIT_DELAYED_WORK(&chg->clear_hdc_work, clear_hdc_work);
+ INIT_DELAYED_WORK(&chg->icl_change_work, smblib_icl_change_work);
+ INIT_DELAYED_WORK(&chg->pl_enable_work, smblib_pl_enable_work);
+ INIT_DELAYED_WORK(&chg->uusb_otg_work, smblib_uusb_otg_work);
+ INIT_DELAYED_WORK(&chg->bb_removal_work, smblib_bb_removal_work);
+ INIT_DELAYED_WORK(&chg->lpd_ra_open_work, smblib_lpd_ra_open_work);
+ INIT_DELAYED_WORK(&chg->lpd_detach_work, smblib_lpd_detach_work);
+ INIT_DELAYED_WORK(&chg->thermal_regulation_work,
+ smblib_thermal_regulation_work);
+ chg->fake_capacity = -EINVAL;
+ chg->fake_input_current_limited = -EINVAL;
+ chg->fake_batt_status = -EINVAL;
+ chg->sink_src_mode = UNATTACHED_MODE;
+ chg->jeita_configured = false;
+ chg->sec_chg_selected = POWER_SUPPLY_CHARGER_SEC_NONE;
+ chg->cp_reason = POWER_SUPPLY_CP_NONE;
+
+ switch (chg->mode) {
+ case PARALLEL_MASTER:
+ rc = qcom_batt_init(chg->smb_version);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't init qcom_batt_init rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = qcom_step_chg_init(chg->dev, chg->step_chg_enabled,
+ chg->sw_jeita_enabled);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't init qcom_step_chg_init rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = smblib_create_votables(chg);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't create votables rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ chg->bms_psy = power_supply_get_by_name("bms");
+
+ if (chg->sec_pl_present) {
+ chg->pl.psy = power_supply_get_by_name("parallel");
+ if (chg->pl.psy) {
+ mutex_lock(&chg->smb_lock);
+ if (chg->sec_chg_selected
+ != POWER_SUPPLY_CHARGER_SEC_CP) {
+ rc = smblib_select_sec_charger(chg,
+ POWER_SUPPLY_CHARGER_SEC_PL);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't config pl charger rc=%d\n",
+ rc);
+ }
+ mutex_unlock(&chg->smb_lock);
+
+ if (chg->smb_temp_max == -EINVAL) {
+ rc = smblib_get_thermal_threshold(chg,
+ SMB_REG_H_THRESHOLD_MSB_REG,
+ &chg->smb_temp_max);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't get charger_temp_max rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ prop_val.intval = chg->smb_temp_max;
+ rc = power_supply_set_property(chg->pl.psy,
+ POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
+ &prop_val);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't set POWER_SUPPLY_PROP_CHARGER_TEMP_MAX rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+ }
+
+ rc = smblib_register_notifier(chg);
+ if (rc < 0) {
+ smblib_err(chg,
+ "Couldn't register notifier rc=%d\n", rc);
+ return rc;
+ }
+ break;
+ case PARALLEL_SLAVE:
+ break;
+ default:
+ smblib_err(chg, "Unsupported mode %d\n", chg->mode);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+int smblib_deinit(struct smb_charger *chg)
+{
+ switch (chg->mode) {
+ case PARALLEL_MASTER:
+ cancel_work_sync(&chg->bms_update_work);
+ cancel_work_sync(&chg->jeita_update_work);
+ cancel_work_sync(&chg->pl_update_work);
+ cancel_delayed_work_sync(&chg->clear_hdc_work);
+ cancel_delayed_work_sync(&chg->icl_change_work);
+ cancel_delayed_work_sync(&chg->pl_enable_work);
+ cancel_delayed_work_sync(&chg->uusb_otg_work);
+ cancel_delayed_work_sync(&chg->bb_removal_work);
+ cancel_delayed_work_sync(&chg->lpd_ra_open_work);
+ cancel_delayed_work_sync(&chg->lpd_detach_work);
+ cancel_delayed_work_sync(&chg->thermal_regulation_work);
+ power_supply_unreg_notifier(&chg->nb);
+ smblib_destroy_votables(chg);
+ qcom_step_chg_deinit();
+ qcom_batt_deinit();
+ break;
+ case PARALLEL_SLAVE:
+ break;
+ default:
+ smblib_err(chg, "Unsupported mode %d\n", chg->mode);
+ return -EINVAL;
+ }
+
+ smblib_iio_deinit(chg);
+
+ return 0;
+}
diff --git a/drivers/power/supply/qcom/smb5-lib.h b/drivers/power/supply/qcom/smb5-lib.h
new file mode 100644
index 0000000..59c58fd
--- /dev/null
+++ b/drivers/power/supply/qcom/smb5-lib.h
@@ -0,0 +1,636 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __SMB5_CHARGER_H
+#define __SMB5_CHARGER_H
+#include <linux/alarmtimer.h>
+#include <linux/ktime.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/consumer.h>
+#include <linux/extcon-provider.h>
+#include "storm-watch.h"
+
+enum print_reason {
+ PR_INTERRUPT = BIT(0),
+ PR_REGISTER = BIT(1),
+ PR_MISC = BIT(2),
+ PR_PARALLEL = BIT(3),
+ PR_OTG = BIT(4),
+ PR_WLS = BIT(5),
+};
+
+#define DEFAULT_VOTER "DEFAULT_VOTER"
+#define USER_VOTER "USER_VOTER"
+#define PD_VOTER "PD_VOTER"
+#define DCP_VOTER "DCP_VOTER"
+#define QC_VOTER "QC_VOTER"
+#define USB_PSY_VOTER "USB_PSY_VOTER"
+#define PL_TAPER_WORK_RUNNING_VOTER "PL_TAPER_WORK_RUNNING_VOTER"
+#define USBIN_V_VOTER "USBIN_V_VOTER"
+#define CHG_STATE_VOTER "CHG_STATE_VOTER"
+#define TAPER_END_VOTER "TAPER_END_VOTER"
+#define THERMAL_DAEMON_VOTER "THERMAL_DAEMON_VOTER"
+#define BOOST_BACK_VOTER "BOOST_BACK_VOTER"
+#define MICRO_USB_VOTER "MICRO_USB_VOTER"
+#define DEBUG_BOARD_VOTER "DEBUG_BOARD_VOTER"
+#define PD_SUSPEND_SUPPORTED_VOTER "PD_SUSPEND_SUPPORTED_VOTER"
+#define PL_DELAY_VOTER "PL_DELAY_VOTER"
+#define CTM_VOTER "CTM_VOTER"
+#define SW_QC3_VOTER "SW_QC3_VOTER"
+#define AICL_RERUN_VOTER "AICL_RERUN_VOTER"
+#define SW_ICL_MAX_VOTER "SW_ICL_MAX_VOTER"
+#define QNOVO_VOTER "QNOVO_VOTER"
+#define BATT_PROFILE_VOTER "BATT_PROFILE_VOTER"
+#define OTG_DELAY_VOTER "OTG_DELAY_VOTER"
+#define USBIN_I_VOTER "USBIN_I_VOTER"
+#define WEAK_CHARGER_VOTER "WEAK_CHARGER_VOTER"
+#define OTG_VOTER "OTG_VOTER"
+#define PL_FCC_LOW_VOTER "PL_FCC_LOW_VOTER"
+#define WBC_VOTER "WBC_VOTER"
+#define HW_LIMIT_VOTER "HW_LIMIT_VOTER"
+#define PL_SMB_EN_VOTER "PL_SMB_EN_VOTER"
+#define FORCE_RECHARGE_VOTER "FORCE_RECHARGE_VOTER"
+#define LPD_VOTER "LPD_VOTER"
+#define FCC_STEPPER_VOTER "FCC_STEPPER_VOTER"
+#define SW_THERM_REGULATION_VOTER "SW_THERM_REGULATION_VOTER"
+
+#define BOOST_BACK_STORM_COUNT 3
+#define WEAK_CHG_STORM_COUNT 8
+
+#define VBAT_TO_VRAW_ADC(v) div_u64((u64)v * 1000000UL, 194637UL)
+
+#define ADC_CHG_TERM_MASK 32767
+
+enum smb_mode {
+ PARALLEL_MASTER = 0,
+ PARALLEL_SLAVE,
+ NUM_MODES,
+};
+
+enum sink_src_mode {
+ SINK_MODE,
+ SRC_MODE,
+ UNATTACHED_MODE,
+};
+
+enum qc2_non_comp_voltage {
+ QC2_COMPLIANT,
+ QC2_NON_COMPLIANT_9V,
+ QC2_NON_COMPLIANT_12V
+};
+
+enum {
+ BOOST_BACK_WA = BIT(0),
+ SW_THERM_REGULATION_WA = BIT(1),
+};
+
+enum smb_irq_index {
+ /* CHGR */
+ CHGR_ERROR_IRQ = 0,
+ CHG_STATE_CHANGE_IRQ,
+ STEP_CHG_STATE_CHANGE_IRQ,
+ STEP_CHG_SOC_UPDATE_FAIL_IRQ,
+ STEP_CHG_SOC_UPDATE_REQ_IRQ,
+ FG_FVCAL_QUALIFIED_IRQ,
+ VPH_ALARM_IRQ,
+ VPH_DROP_PRECHG_IRQ,
+ /* DCDC */
+ OTG_FAIL_IRQ,
+ OTG_OC_DISABLE_SW_IRQ,
+ OTG_OC_HICCUP_IRQ,
+ BSM_ACTIVE_IRQ,
+ HIGH_DUTY_CYCLE_IRQ,
+ INPUT_CURRENT_LIMITING_IRQ,
+ CONCURRENT_MODE_DISABLE_IRQ,
+ SWITCHER_POWER_OK_IRQ,
+ /* BATIF */
+ BAT_TEMP_IRQ,
+ ALL_CHNL_CONV_DONE_IRQ,
+ BAT_OV_IRQ,
+ BAT_LOW_IRQ,
+ BAT_THERM_OR_ID_MISSING_IRQ,
+ BAT_TERMINAL_MISSING_IRQ,
+ BUCK_OC_IRQ,
+ VPH_OV_IRQ,
+ /* USB */
+ USBIN_COLLAPSE_IRQ,
+ USBIN_VASHDN_IRQ,
+ USBIN_UV_IRQ,
+ USBIN_OV_IRQ,
+ USBIN_PLUGIN_IRQ,
+ USBIN_REVI_CHANGE_IRQ,
+ USBIN_SRC_CHANGE_IRQ,
+ USBIN_ICL_CHANGE_IRQ,
+ /* DC */
+ DCIN_VASHDN_IRQ,
+ DCIN_UV_IRQ,
+ DCIN_OV_IRQ,
+ DCIN_PLUGIN_IRQ,
+ DCIN_REVI_IRQ,
+ DCIN_PON_IRQ,
+ DCIN_EN_IRQ,
+ /* TYPEC */
+ TYPEC_OR_RID_DETECTION_CHANGE_IRQ,
+ TYPEC_VPD_DETECT_IRQ,
+ TYPEC_CC_STATE_CHANGE_IRQ,
+ TYPEC_VCONN_OC_IRQ,
+ TYPEC_VBUS_CHANGE_IRQ,
+ TYPEC_ATTACH_DETACH_IRQ,
+ TYPEC_LEGACY_CABLE_DETECT_IRQ,
+ TYPEC_TRY_SNK_SRC_DETECT_IRQ,
+ /* MISC */
+ WDOG_SNARL_IRQ,
+ WDOG_BARK_IRQ,
+ AICL_FAIL_IRQ,
+ AICL_DONE_IRQ,
+ SMB_EN_IRQ,
+ IMP_TRIGGER_IRQ,
+ TEMP_CHANGE_IRQ,
+ TEMP_CHANGE_SMB_IRQ,
+ /* FLASH */
+ VREG_OK_IRQ,
+ ILIM_S2_IRQ,
+ ILIM_S1_IRQ,
+ VOUT_DOWN_IRQ,
+ VOUT_UP_IRQ,
+ FLASH_STATE_CHANGE_IRQ,
+ TORCH_REQ_IRQ,
+ FLASH_EN_IRQ,
+ /* END */
+ SMB_IRQ_MAX,
+};
+
+enum float_options {
+ FLOAT_DCP = 1,
+ FLOAT_SDP = 2,
+ DISABLE_CHARGING = 3,
+ SUSPEND_INPUT = 4,
+};
+
+enum chg_term_config_src {
+ ITERM_SRC_UNSPECIFIED,
+ ITERM_SRC_ADC,
+ ITERM_SRC_ANALOG
+};
+
+struct smb_irq_info {
+ const char *name;
+ const irq_handler_t handler;
+ const bool wake;
+ const struct storm_watch storm_data;
+ struct smb_irq_data *irq_data;
+ int irq;
+};
+
+static const unsigned int smblib_extcon_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_NONE,
+};
+
+enum lpd_reason {
+ LPD_NONE,
+ LPD_MOISTURE_DETECTED,
+ LPD_FLOATING_CABLE,
+};
+
+/* Following states are applicable only for floating cable during LPD */
+enum lpd_stage {
+ /* initial stage */
+ LPD_STAGE_NONE,
+ /* started and ongoing */
+ LPD_STAGE_FLOAT,
+ /* cancel if started, or don't start */
+ LPD_STAGE_FLOAT_CANCEL,
+ /* confirmed and mitigation measures taken for 60 s */
+ LPD_STAGE_COMMIT,
+};
+
+enum thermal_status_levels {
+ TEMP_SHUT_DOWN = 0,
+ TEMP_SHUT_DOWN_SMB,
+ TEMP_ALERT_LEVEL,
+ TEMP_ABOVE_RANGE,
+ TEMP_WITHIN_RANGE,
+ TEMP_BELOW_RANGE,
+};
+
+/* EXTCON_USB and EXTCON_USB_HOST are mutually exclusive */
+static const u32 smblib_extcon_exclusive[] = {0x3, 0};
+
+struct smb_regulator {
+ struct regulator_dev *rdev;
+ struct regulator_desc rdesc;
+};
+
+struct smb_irq_data {
+ void *parent_data;
+ const char *name;
+ struct storm_watch storm_data;
+};
+
+struct smb_chg_param {
+ const char *name;
+ u16 reg;
+ int min_u;
+ int max_u;
+ int step_u;
+ int (*get_proc)(struct smb_chg_param *param,
+ u8 val_raw);
+ int (*set_proc)(struct smb_chg_param *param,
+ int val_u,
+ u8 *val_raw);
+};
+
+struct buck_boost_freq {
+ int freq_khz;
+ u8 val;
+};
+
+struct smb_chg_freq {
+ unsigned int freq_5V;
+ unsigned int freq_6V_8V;
+ unsigned int freq_9V;
+ unsigned int freq_12V;
+ unsigned int freq_removal;
+ unsigned int freq_below_otg_threshold;
+ unsigned int freq_above_otg_threshold;
+};
+
+struct smb_params {
+ struct smb_chg_param fcc;
+ struct smb_chg_param fv;
+ struct smb_chg_param usb_icl;
+ struct smb_chg_param icl_max_stat;
+ struct smb_chg_param icl_stat;
+ struct smb_chg_param otg_cl;
+ struct smb_chg_param dc_icl;
+ struct smb_chg_param jeita_cc_comp_hot;
+ struct smb_chg_param jeita_cc_comp_cold;
+ struct smb_chg_param freq_switcher;
+};
+
+struct parallel_params {
+ struct power_supply *psy;
+};
+
+struct smb_iio {
+ struct iio_channel *temp_chan;
+ struct iio_channel *usbin_i_chan;
+ struct iio_channel *usbin_v_chan;
+ struct iio_channel *mid_chan;
+ struct iio_channel *batt_i_chan;
+ struct iio_channel *connector_temp_chan;
+ struct iio_channel *sbux_chan;
+ struct iio_channel *vph_v_chan;
+ struct iio_channel *die_temp_chan;
+ struct iio_channel *skin_temp_chan;
+ struct iio_channel *smb_temp_chan;
+};
+
+struct smb_charger {
+ struct device *dev;
+ char *name;
+ struct regmap *regmap;
+ struct smb_irq_info *irq_info;
+ struct smb_params param;
+ struct smb_iio iio;
+ int *debug_mask;
+ int pd_disabled;
+ enum smb_mode mode;
+ struct smb_chg_freq chg_freq;
+ int smb_version;
+ int otg_delay_ms;
+ int weak_chg_icl_ua;
+ bool pd_not_supported;
+
+ /* locks */
+ struct mutex smb_lock;
+ struct mutex ps_change_lock;
+
+ /* power supplies */
+ struct power_supply *batt_psy;
+ struct power_supply *usb_psy;
+ struct power_supply *dc_psy;
+ struct power_supply *bms_psy;
+ struct power_supply *usb_main_psy;
+ struct power_supply *usb_port_psy;
+ struct power_supply *wls_psy;
+ enum power_supply_type real_charger_type;
+
+ /* notifiers */
+ struct notifier_block nb;
+
+ /* parallel charging */
+ struct parallel_params pl;
+
+ /* regulators */
+ struct smb_regulator *vbus_vreg;
+ struct smb_regulator *vconn_vreg;
+ struct regulator *dpdm_reg;
+
+ /* votables */
+ struct votable *dc_suspend_votable;
+ struct votable *fcc_votable;
+ struct votable *fv_votable;
+ struct votable *usb_icl_votable;
+ struct votable *awake_votable;
+ struct votable *pl_disable_votable;
+ struct votable *chg_disable_votable;
+ struct votable *pl_enable_votable_indirect;
+ struct votable *usb_irq_enable_votable;
+ struct votable *cp_disable_votable;
+ struct votable *wdog_snarl_irq_en_votable;
+
+ /* work */
+ struct work_struct bms_update_work;
+ struct work_struct pl_update_work;
+ struct work_struct jeita_update_work;
+ struct delayed_work ps_change_timeout_work;
+ struct delayed_work clear_hdc_work;
+ struct delayed_work icl_change_work;
+ struct delayed_work pl_enable_work;
+ struct delayed_work uusb_otg_work;
+ struct delayed_work bb_removal_work;
+ struct delayed_work lpd_ra_open_work;
+ struct delayed_work lpd_detach_work;
+ struct delayed_work thermal_regulation_work;
+
+ struct alarm lpd_recheck_timer;
+
+ /* secondary charger config */
+ bool sec_pl_present;
+ bool sec_cp_present;
+ int sec_chg_selected;
+ int cp_reason;
+
+ /* pd */
+ int voltage_min_uv;
+ int voltage_max_uv;
+ int pd_active;
+ bool pd_hard_reset;
+ bool pr_swap_in_progress;
+ bool early_usb_attach;
+ bool ok_to_pd;
+ bool typec_legacy;
+
+ /* cached status */
+ bool system_suspend_supported;
+ int boost_threshold_ua;
+ int system_temp_level;
+ int thermal_levels;
+ int *thermal_mitigation;
+ int dcp_icl_ua;
+ int fake_capacity;
+ int fake_batt_status;
+ bool step_chg_enabled;
+ bool sw_jeita_enabled;
+ bool is_hdc;
+ bool chg_done;
+ int connector_type;
+ bool otg_en;
+ bool suspend_input_on_debug_batt;
+ int default_icl_ua;
+ int otg_cl_ua;
+ bool uusb_apsd_rerun_done;
+ bool typec_present;
+ int fake_input_current_limited;
+ int typec_mode;
+ int usb_icl_change_irq_enabled;
+ u32 jeita_status;
+ u8 float_cfg;
+ bool use_extcon;
+ bool otg_present;
+ int hw_max_icl_ua;
+ int auto_recharge_soc;
+ enum sink_src_mode sink_src_mode;
+ bool jeita_configured;
+ int charger_temp_max;
+ int smb_temp_max;
+ u8 typec_try_mode;
+ enum lpd_stage lpd_stage;
+ enum lpd_reason lpd_reason;
+ bool fcc_stepper_enable;
+ int die_temp;
+ int smb_temp;
+ int skin_temp;
+ int connector_temp;
+ int thermal_status;
+ int main_fcc_max;
+
+ /* workaround flag */
+ u32 wa_flags;
+ int boost_current_ua;
+ int qc2_max_pulses;
+ enum qc2_non_comp_voltage qc2_unsupported_voltage;
+
+ /* extcon for VBUS / ID notification to USB for uUSB */
+ struct extcon_dev *extcon;
+
+ /* battery profile */
+ int batt_profile_fcc_ua;
+ int batt_profile_fv_uv;
+
+ int usb_icl_delta_ua;
+ int pulse_cnt;
+
+ int die_health;
+ int connector_health;
+
+ /* flash */
+ u32 flash_derating_soc;
+ u32 flash_disable_soc;
+ u32 headroom_mode;
+ bool flash_init_done;
+ bool flash_active;
+
+ /* wireless */
+ int wireless_vout;
+};
+
+int smblib_read(struct smb_charger *chg, u16 addr, u8 *val);
+int smblib_masked_write(struct smb_charger *chg, u16 addr, u8 mask, u8 val);
+int smblib_write(struct smb_charger *chg, u16 addr, u8 val);
+int smblib_batch_write(struct smb_charger *chg, u16 addr, u8 *val, int count);
+int smblib_batch_read(struct smb_charger *chg, u16 addr, u8 *val, int count);
+
+int smblib_get_charge_param(struct smb_charger *chg,
+ struct smb_chg_param *param, int *val_u);
+int smblib_get_usb_suspend(struct smb_charger *chg, int *suspend);
+
+int smblib_enable_charging(struct smb_charger *chg, bool enable);
+int smblib_set_charge_param(struct smb_charger *chg,
+ struct smb_chg_param *param, int val_u);
+int smblib_set_usb_suspend(struct smb_charger *chg, bool suspend);
+int smblib_set_dc_suspend(struct smb_charger *chg, bool suspend);
+
+int smblib_mapping_soc_from_field_value(struct smb_chg_param *param,
+ int val_u, u8 *val_raw);
+int smblib_mapping_cc_delta_to_field_value(struct smb_chg_param *param,
+ u8 val_raw);
+int smblib_mapping_cc_delta_from_field_value(struct smb_chg_param *param,
+ int val_u, u8 *val_raw);
+int smblib_set_chg_freq(struct smb_chg_param *param,
+ int val_u, u8 *val_raw);
+int smblib_set_prop_boost_current(struct smb_charger *chg,
+ const union power_supply_propval *val);
+int smblib_vbus_regulator_enable(struct regulator_dev *rdev);
+int smblib_vbus_regulator_disable(struct regulator_dev *rdev);
+int smblib_vbus_regulator_is_enabled(struct regulator_dev *rdev);
+
+int smblib_vconn_regulator_enable(struct regulator_dev *rdev);
+int smblib_vconn_regulator_disable(struct regulator_dev *rdev);
+int smblib_vconn_regulator_is_enabled(struct regulator_dev *rdev);
+
+irqreturn_t default_irq_handler(int irq, void *data);
+irqreturn_t chg_state_change_irq_handler(int irq, void *data);
+irqreturn_t batt_temp_changed_irq_handler(int irq, void *data);
+irqreturn_t batt_psy_changed_irq_handler(int irq, void *data);
+irqreturn_t usbin_uv_irq_handler(int irq, void *data);
+irqreturn_t usb_plugin_irq_handler(int irq, void *data);
+irqreturn_t usb_source_change_irq_handler(int irq, void *data);
+irqreturn_t icl_change_irq_handler(int irq, void *data);
+irqreturn_t typec_state_change_irq_handler(int irq, void *data);
+irqreturn_t typec_attach_detach_irq_handler(int irq, void *data);
+irqreturn_t dc_plugin_irq_handler(int irq, void *data);
+irqreturn_t high_duty_cycle_irq_handler(int irq, void *data);
+irqreturn_t switcher_power_ok_irq_handler(int irq, void *data);
+irqreturn_t wdog_snarl_irq_handler(int irq, void *data);
+irqreturn_t wdog_bark_irq_handler(int irq, void *data);
+irqreturn_t typec_or_rid_detection_change_irq_handler(int irq, void *data);
+
+int smblib_get_prop_input_suspend(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_batt_present(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_batt_capacity(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_batt_status(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_batt_charge_type(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_batt_charge_done(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_batt_health(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_system_temp_level(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_system_temp_level_max(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_input_current_limited(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_batt_iterm(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_set_prop_input_suspend(struct smb_charger *chg,
+ const union power_supply_propval *val);
+int smblib_set_prop_batt_capacity(struct smb_charger *chg,
+ const union power_supply_propval *val);
+int smblib_set_prop_batt_status(struct smb_charger *chg,
+ const union power_supply_propval *val);
+int smblib_set_prop_system_temp_level(struct smb_charger *chg,
+ const union power_supply_propval *val);
+int smblib_set_prop_input_current_limited(struct smb_charger *chg,
+ const union power_supply_propval *val);
+
+int smblib_get_prop_dc_present(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_dc_online(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_dc_current_max(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_set_prop_dc_current_max(struct smb_charger *chg,
+ const union power_supply_propval *val);
+int smblib_get_prop_dc_voltage_now(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_dc_voltage_max(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_set_prop_voltage_wls_output(struct smb_charger *chg,
+ const union power_supply_propval *val);
+int smblib_get_prop_usb_present(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_usb_online(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_usb_suspend(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_usb_voltage_max(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_usb_voltage_now(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_low_power(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_usb_current_now(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_typec_cc_orientation(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_typec_select_rp(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_typec_power_role(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_input_current_settled(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_input_voltage_settled(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_pd_in_hard_reset(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_pe_start(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_charger_temp(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_die_health(struct smb_charger *chg);
+int smblib_get_prop_connector_health(struct smb_charger *chg);
+int smblib_set_prop_pd_current_max(struct smb_charger *chg,
+ const union power_supply_propval *val);
+int smblib_set_prop_sdp_current_max(struct smb_charger *chg,
+ const union power_supply_propval *val);
+int smblib_set_prop_pd_voltage_max(struct smb_charger *chg,
+ const union power_supply_propval *val);
+int smblib_set_prop_pd_voltage_min(struct smb_charger *chg,
+ const union power_supply_propval *val);
+int smblib_set_prop_typec_power_role(struct smb_charger *chg,
+ const union power_supply_propval *val);
+int smblib_set_prop_typec_select_rp(struct smb_charger *chg,
+ const union power_supply_propval *val);
+int smblib_set_prop_pd_active(struct smb_charger *chg,
+ const union power_supply_propval *val);
+int smblib_set_prop_pd_in_hard_reset(struct smb_charger *chg,
+ const union power_supply_propval *val);
+int smblib_set_prop_ship_mode(struct smb_charger *chg,
+ const union power_supply_propval *val);
+int smblib_set_prop_rechg_soc_thresh(struct smb_charger *chg,
+ const union power_supply_propval *val);
+void smblib_suspend_on_debug_battery(struct smb_charger *chg);
+int smblib_rerun_apsd_if_required(struct smb_charger *chg);
+int smblib_get_prop_fcc_delta(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_thermal_threshold(struct smb_charger *chg, u16 addr, int *val);
+int smblib_dp_dm(struct smb_charger *chg, int val);
+int smblib_disable_hw_jeita(struct smb_charger *chg, bool disable);
+int smblib_rerun_aicl(struct smb_charger *chg);
+int smblib_set_icl_current(struct smb_charger *chg, int icl_ua);
+int smblib_get_icl_current(struct smb_charger *chg, int *icl_ua);
+int smblib_get_charge_current(struct smb_charger *chg, int *total_current_ua);
+int smblib_get_prop_pr_swap_in_progress(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_set_prop_pr_swap_in_progress(struct smb_charger *chg,
+ const union power_supply_propval *val);
+int smblib_get_prop_from_bms(struct smb_charger *chg,
+ enum power_supply_property psp,
+ union power_supply_propval *val);
+int smblib_get_iio_channel(struct smb_charger *chg, const char *propname,
+ struct iio_channel **chan);
+int smblib_read_iio_channel(struct smb_charger *chg, struct iio_channel *chan,
+ int div, int *data);
+int smblib_configure_hvdcp_apsd(struct smb_charger *chg, bool enable);
+int smblib_icl_override(struct smb_charger *chg, bool override);
+enum alarmtimer_restart smblib_lpd_recheck_timer(struct alarm *alarm,
+ ktime_t time);
+int smblib_toggle_smb_en(struct smb_charger *chg, int toggle);
+
+int smblib_init(struct smb_charger *chg);
+int smblib_deinit(struct smb_charger *chg);
+#endif /* __SMB5_CHARGER_H */
diff --git a/drivers/power/supply/qcom/smb5-reg.h b/drivers/power/supply/qcom/smb5-reg.h
new file mode 100644
index 0000000..6ffad9b
--- /dev/null
+++ b/drivers/power/supply/qcom/smb5-reg.h
@@ -0,0 +1,477 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __SMB5_CHARGER_REG_H
+#define __SMB5_CHARGER_REG_H
+
+#include <linux/bitops.h>
+
+#define CHGR_BASE 0x1000
+#define DCDC_BASE 0x1100
+#define BATIF_BASE 0x1200
+#define USBIN_BASE 0x1300
+#define DCIN_BASE 0x1400
+#define TYPEC_BASE 0X1500
+#define MISC_BASE 0x1600
+
+#define PERPH_TYPE_OFFSET 0x04
+#define TYPE_MASK GENMASK(7, 0)
+#define PERPH_SUBTYPE_OFFSET 0x05
+#define SUBTYPE_MASK GENMASK(7, 0)
+#define INT_RT_STS_OFFSET 0x10
+
+/********************************
+ * CHGR Peripheral Registers *
+ ********************************/
+#define BATTERY_CHARGER_STATUS_1_REG (CHGR_BASE + 0x06)
+#define BATTERY_CHARGER_STATUS_MASK GENMASK(2, 0)
+enum {
+ INHIBIT_CHARGE = 0,
+ TRICKLE_CHARGE,
+ PRE_CHARGE,
+ FULLON_CHARGE,
+ TAPER_CHARGE,
+ TERMINATE_CHARGE,
+ PAUSE_CHARGE,
+ DISABLE_CHARGE,
+};
+
+#define BATTERY_CHARGER_STATUS_2_REG (CHGR_BASE + 0x07)
+#define CHARGER_ERROR_STATUS_BAT_OV_BIT BIT(1)
+
+#define BATTERY_CHARGER_STATUS_5_REG (CHGR_BASE + 0x0B)
+#define ENABLE_TRICKLE_BIT BIT(2)
+#define ENABLE_PRE_CHARGING_BIT BIT(1)
+#define ENABLE_FULLON_MODE_BIT BIT(0)
+
+#define BATTERY_CHARGER_STATUS_7_REG (CHGR_BASE + 0x0D)
+#define BAT_TEMP_STATUS_SOFT_LIMIT_MASK GENMASK(5, 4)
+#define BAT_TEMP_STATUS_HOT_SOFT_BIT BIT(5)
+#define BAT_TEMP_STATUS_COLD_SOFT_BIT BIT(4)
+#define BAT_TEMP_STATUS_TOO_HOT_BIT BIT(3)
+#define BAT_TEMP_STATUS_TOO_COLD_BIT BIT(2)
+#define BAT_TEMP_STATUS_TOO_HOT_AFP_BIT BIT(1)
+#define BAT_TEMP_STATUS_TOO_COLD_AFP_BIT BIT(0)
+
+#define CHARGING_ENABLE_CMD_REG (CHGR_BASE + 0x42)
+#define CHARGING_ENABLE_CMD_BIT BIT(0)
+
+#define CHGR_CFG2_REG (CHGR_BASE + 0x51)
+#define RECHG_MASK GENMASK(2, 1)
+#define VBAT_BASED_RECHG_BIT BIT(2)
+#define SOC_BASED_RECHG_BIT GENMASK(2, 1)
+#define CHARGER_INHIBIT_BIT BIT(0)
+
+#define CHGR_FAST_CHARGE_CURRENT_CFG_REG (CHGR_BASE + 0x61)
+
+#define CHGR_ADC_ITERM_UP_THD_MSB_REG (CHGR_BASE + 0x67)
+#define CHGR_ADC_ITERM_UP_THD_LSB_REG (CHGR_BASE + 0x68)
+#define CHGR_ADC_ITERM_LO_THD_MSB_REG (CHGR_BASE + 0x69)
+#define CHGR_ADC_ITERM_LO_THD_LSB_REG (CHGR_BASE + 0x6A)
+
+#define CHGR_NO_SAMPLE_TERM_RCHG_CFG_REG (CHGR_BASE + 0x6B)
+#define NO_OF_SAMPLE_FOR_RCHG_SHIFT 2
+#define NO_OF_SAMPLE_FOR_RCHG GENMASK(3, 2)
+
+#define CHGR_FLOAT_VOLTAGE_CFG_REG (CHGR_BASE + 0x70)
+
+#define CHARGE_INHIBIT_THRESHOLD_CFG_REG (CHGR_BASE + 0x72)
+#define CHARGE_INHIBIT_THRESHOLD_MASK GENMASK(1, 0)
+#define INHIBIT_ANALOG_VFLT_MINUS_50MV 0
+#define INHIBIT_ANALOG_VFLT_MINUS_100MV 1
+#define INHIBIT_ANALOG_VFLT_MINUS_200MV 2
+#define INHIBIT_ANALOG_VFLT_MINUS_300MV 3
+
+#define CHARGE_RCHG_SOC_THRESHOLD_CFG_REG (CHGR_BASE + 0x7D)
+
+#define CHGR_ADC_RECHARGE_THRESHOLD_MSB_REG (CHGR_BASE + 0x7E)
+
+#define CHGR_ADC_RECHARGE_THRESHOLD_LSB_REG (CHGR_BASE + 0x7F)
+
+#define JEITA_EN_CFG_REG (CHGR_BASE + 0x90)
+#define JEITA_EN_HOT_SL_FCV_BIT BIT(3)
+#define JEITA_EN_COLD_SL_FCV_BIT BIT(2)
+#define JEITA_EN_HOT_SL_CCC_BIT BIT(1)
+#define JEITA_EN_COLD_SL_CCC_BIT BIT(0)
+
+#define JEITA_CCCOMP_CFG_HOT_REG (CHGR_BASE + 0x92)
+#define JEITA_CCCOMP_CFG_COLD_REG (CHGR_BASE + 0x93)
+
+#define CHGR_JEITA_THRESHOLD_BASE_REG(i) (CHGR_BASE + 0x94 + (i * 4))
+
+#define CHGR_FAST_CHARGE_SAFETY_TIMER_CFG_REG (CHGR_BASE + 0xA2)
+#define FAST_CHARGE_SAFETY_TIMER_192_MIN 0x0
+#define FAST_CHARGE_SAFETY_TIMER_384_MIN 0x1
+#define FAST_CHARGE_SAFETY_TIMER_768_MIN 0x2
+#define FAST_CHARGE_SAFETY_TIMER_1536_MIN 0x3
+
+#define CHGR_ENG_CHARGING_CFG_REG (CHGR_BASE + 0xC0)
+#define CHGR_ITERM_USE_ANALOG_BIT BIT(3)
+
+/********************************
+ * DCDC Peripheral Registers *
+ ********************************/
+#define ICL_MAX_STATUS_REG (DCDC_BASE + 0x06)
+
+#define AICL_ICL_STATUS_REG (DCDC_BASE + 0x08)
+
+#define AICL_STATUS_REG (DCDC_BASE + 0x0A)
+#define SOFT_ILIMIT_BIT BIT(6)
+#define AICL_DONE_BIT BIT(0)
+
+#define POWER_PATH_STATUS_REG (DCDC_BASE + 0x0B)
+#define USBIN_SUSPEND_STS_BIT BIT(6)
+#define USE_USBIN_BIT BIT(4)
+#define USE_DCIN_BIT BIT(3)
+#define VALID_INPUT_POWER_SOURCE_STS_BIT BIT(0)
+
+#define DCDC_CMD_OTG_REG (DCDC_BASE + 0x40)
+#define OTG_EN_BIT BIT(0)
+
+#define DCDC_FSW_SEL_REG (DCDC_BASE + 0x50)
+
+#define DCDC_OTG_CURRENT_LIMIT_CFG_REG (DCDC_BASE + 0x52)
+#define OTG_CURRENT_LIMIT_MASK GENMASK(2, 0)
+enum {
+ OTG_CURRENT_LIMIT_500_MA,
+ OTG_CURRENT_LIMIT_1000_MA,
+ OTG_CURRENT_LIMIT_1500_MA,
+ OTG_CURRENT_LIMIT_2000_MA,
+ OTG_CURRENT_LIMIT_2500_MA,
+ OTG_CURRENT_LIMIT_3000_MA
+};
+
+#define DCDC_OTG_CFG_REG (DCDC_BASE + 0x53)
+#define OTG_EN_SRC_CFG_BIT BIT(1)
+
+#define DCDC_CFG_REF_MAX_PSNS_REG (DCDC_BASE + 0x8C)
+
+#define DCDC_ENG_SDCDC_CFG5_REG (DCDC_BASE + 0xC4)
+#define ENG_SDCDC_BAT_HPWR_MASK GENMASK(7, 6)
+enum {
+ BOOST_MODE_THRESH_3P3_V,
+ BOOST_MODE_THRESH_3P4_V = 0x40,
+ BOOST_MODE_THRESH_3P5_V = 0x80,
+ BOOST_MODE_THRESH_3P6_V = 0xC0
+};
+
+/********************************
+ * BATIF Peripheral Registers *
+ ********************************/
+
+/* BATIF Interrupt Bits */
+#define VPH_OV_RT_STS_BIT BIT(7)
+#define BUCK_OC_RT_STS_BIT BIT(6)
+#define BAT_TERMINAL_MISSING_RT_STS_BIT BIT(5)
+#define BAT_THERM_OR_ID_MISSING_RT_STS_BIT BIT(4)
+#define BAT_LOW_RT_STS_BIT BIT(3)
+#define BAT_OV_RT_STS_BIT BIT(2)
+#define ALL_CHNL_CONV_DONE_RT_STS BIT(1)
+#define BAT_TEMP_RT_STS_BIT BIT(0)
+
+#define SHIP_MODE_REG (BATIF_BASE + 0x40)
+#define SHIP_MODE_EN_BIT BIT(0)
+
+/********************************
+ * USBIN Peripheral Registers *
+ ********************************/
+#define APSD_STATUS_REG (USBIN_BASE + 0x07)
+#define APSD_STATUS_7_BIT BIT(7)
+#define HVDCP_CHECK_TIMEOUT_BIT BIT(6)
+#define SLOW_PLUGIN_TIMEOUT_BIT BIT(5)
+#define ENUMERATION_DONE_BIT BIT(4)
+#define VADP_CHANGE_DONE_AFTER_AUTH_BIT BIT(3)
+#define QC_AUTH_DONE_STATUS_BIT BIT(2)
+#define QC_CHARGER_BIT BIT(1)
+#define APSD_DTC_STATUS_DONE_BIT BIT(0)
+
+#define APSD_RESULT_STATUS_REG (USBIN_BASE + 0x08)
+#define APSD_RESULT_STATUS_7_BIT BIT(7)
+#define APSD_RESULT_STATUS_MASK GENMASK(6, 0)
+#define QC_3P0_BIT BIT(6)
+#define QC_2P0_BIT BIT(5)
+#define FLOAT_CHARGER_BIT BIT(4)
+#define DCP_CHARGER_BIT BIT(3)
+#define CDP_CHARGER_BIT BIT(2)
+#define OCP_CHARGER_BIT BIT(1)
+#define SDP_CHARGER_BIT BIT(0)
+
+#define QC_CHANGE_STATUS_REG (USBIN_BASE + 0x09)
+#define QC_12V_BIT BIT(2)
+#define QC_9V_BIT BIT(1)
+#define QC_5V_BIT BIT(0)
+#define QC_2P0_STATUS_MASK GENMASK(2, 0)
+
+/* USBIN Interrupt Bits */
+#define USBIN_ICL_CHANGE_RT_STS_BIT BIT(7)
+#define USBIN_SOURCE_CHANGE_RT_STS_BIT BIT(6)
+#define USBIN_REVI_RT_STS_BIT BIT(5)
+#define USBIN_PLUGIN_RT_STS_BIT BIT(4)
+#define USBIN_OV_RT_STS_BIT BIT(3)
+#define USBIN_UV_RT_STS_BIT BIT(2)
+#define USBIN_VASHDN_RT_STS_BIT BIT(1)
+#define USBIN_COLLAPSE_RT_STS_BIT BIT(0)
+
+#define USBIN_CMD_IL_REG (USBIN_BASE + 0x40)
+#define USBIN_SUSPEND_BIT BIT(0)
+
+#define CMD_APSD_REG (USBIN_BASE + 0x41)
+#define APSD_RERUN_BIT BIT(0)
+
+#define CMD_HVDCP_2_REG (USBIN_BASE + 0x43)
+#define FORCE_12V_BIT BIT(5)
+#define FORCE_9V_BIT BIT(4)
+#define FORCE_5V_BIT BIT(3)
+#define SINGLE_DECREMENT_BIT BIT(1)
+#define SINGLE_INCREMENT_BIT BIT(0)
+
+#define USB_CMD_PULLDOWN_REG (USBIN_BASE + 0x45)
+#define EN_PULLDOWN_USB_IN_BIT BIT(0)
+
+#define HVDCP_PULSE_COUNT_MAX_REG (USBIN_BASE + 0x5B)
+#define HVDCP_PULSE_COUNT_MAX_QC2_MASK GENMASK(7, 6)
+enum {
+ HVDCP_PULSE_COUNT_MAX_QC2_5V = 0,
+ HVDCP_PULSE_COUNT_MAX_QC2_9V = 0x40,
+ HVDCP_PULSE_COUNT_MAX_QC2_12V = 0x80,
+ HVDCP_PULSE_COUNT_MAX_QC2_INVALID = 0xC0
+};
+
+#define USBIN_ADAPTER_ALLOW_CFG_REG (USBIN_BASE + 0x60)
+enum {
+ USBIN_ADAPTER_ALLOW_5V = 0,
+ USBIN_ADAPTER_ALLOW_9V = 2,
+ USBIN_ADAPTER_ALLOW_5V_OR_9V = 3,
+ USBIN_ADAPTER_ALLOW_12V = 4,
+ USBIN_ADAPTER_ALLOW_5V_OR_12V = 5,
+ USBIN_ADAPTER_ALLOW_9V_TO_12V = 6,
+ USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V = 7,
+ USBIN_ADAPTER_ALLOW_5V_TO_9V = 8,
+ USBIN_ADAPTER_ALLOW_5V_TO_12V = 12,
+};
+
+#define USBIN_OPTIONS_1_CFG_REG (USBIN_BASE + 0x62)
+#define HVDCP_AUTH_ALG_EN_CFG_BIT BIT(6)
+#define HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT BIT(5)
+#define BC1P2_SRC_DETECT_BIT BIT(3)
+#define HVDCP_EN_BIT BIT(2)
+
+#define USBIN_OPTIONS_2_CFG_REG (USBIN_BASE + 0x63)
+#define FLOAT_OPTIONS_MASK GENMASK(2, 0)
+#define FLOAT_DIS_CHGING_CFG_BIT BIT(2)
+#define SUSPEND_FLOAT_CFG_BIT BIT(1)
+#define FORCE_FLOAT_SDP_CFG_BIT BIT(0)
+
+#define USBIN_LOAD_CFG_REG (USBIN_BASE + 0x65)
+#define ICL_OVERRIDE_AFTER_APSD_BIT BIT(4)
+#define USBIN_AICL_STEP_TIMING_SEL_MASK GENMASK(3, 2)
+#define USBIN_IN_COLLAPSE_GF_SEL_MASK GENMASK(1, 0)
+
+#define USBIN_ICL_OPTIONS_REG (USBIN_BASE + 0x66)
+#define CFG_USB3P0_SEL_BIT BIT(2)
+#define USB51_MODE_BIT BIT(1)
+#define USBIN_MODE_CHG_BIT BIT(0)
+
+#define USBIN_CURRENT_LIMIT_CFG_REG (USBIN_BASE + 0x70)
+
+#define USBIN_AICL_OPTIONS_CFG_REG (USBIN_BASE + 0x80)
+#define SUSPEND_ON_COLLAPSE_USBIN_BIT BIT(7)
+#define USBIN_AICL_PERIODIC_RERUN_EN_BIT BIT(4)
+#define USBIN_AICL_ADC_EN_BIT BIT(3)
+#define USBIN_AICL_EN_BIT BIT(2)
+
+#define USB_ENG_SSUPPLY_USB2_REG (USBIN_BASE + 0xC0)
+#define ENG_SSUPPLY_12V_OV_OPT_BIT BIT(1)
+
+/********************************
+ * DCIN Peripheral Registers *
+ ********************************/
+
+/* DCIN Interrupt Bits */
+#define DCIN_PLUGIN_RT_STS_BIT BIT(4)
+
+#define DCIN_CMD_IL_REG (DCIN_BASE + 0x40)
+#define DCIN_SUSPEND_BIT BIT(0)
+
+/********************************
+ * TYPEC Peripheral Registers *
+ ********************************/
+#define TYPE_C_SNK_STATUS_REG (TYPEC_BASE + 0x06)
+#define DETECTED_SRC_TYPE_MASK GENMASK(3, 0)
+#define SNK_RP_STD_BIT BIT(3)
+#define SNK_RP_1P5_BIT BIT(2)
+#define SNK_RP_3P0_BIT BIT(1)
+#define SNK_RP_SHORT_BIT BIT(0)
+
+#define TYPE_C_SRC_STATUS_REG (TYPEC_BASE + 0x08)
+#define DETECTED_SNK_TYPE_MASK GENMASK(4, 0)
+#define SRC_HIGH_BATT_BIT BIT(5)
+#define SRC_DEBUG_ACCESS_BIT BIT(4)
+#define SRC_RD_OPEN_BIT BIT(3)
+#define SRC_RD_RA_VCONN_BIT BIT(2)
+#define SRC_RA_OPEN_BIT BIT(1)
+#define AUDIO_ACCESS_RA_RA_BIT BIT(0)
+
+#define TYPE_C_STATE_MACHINE_STATUS_REG (TYPEC_BASE + 0x09)
+#define TYPEC_ATTACH_DETACH_STATE_BIT BIT(5)
+
+#define TYPE_C_MISC_STATUS_REG (TYPEC_BASE + 0x0B)
+#define TYPEC_WATER_DETECTION_STATUS_BIT BIT(7)
+#define SNK_SRC_MODE_BIT BIT(6)
+#define TYPEC_VBUS_ERROR_STATUS_BIT BIT(4)
+#define TYPEC_TCCDEBOUNCE_DONE_STATUS_BIT BIT(3)
+#define CC_ORIENTATION_BIT BIT(1)
+#define CC_ATTACHED_BIT BIT(0)
+
+#define LEGACY_CABLE_STATUS_REG (TYPEC_BASE + 0x0D)
+#define TYPEC_LEGACY_CABLE_STATUS_BIT BIT(1)
+#define TYPEC_NONCOMP_LEGACY_CABLE_STATUS_BIT BIT(0)
+
+#define TYPEC_U_USB_STATUS_REG (TYPEC_BASE + 0x0F)
+#define U_USB_GROUND_NOVBUS_BIT BIT(6)
+#define U_USB_GROUND_BIT BIT(4)
+
+#define TYPE_C_MODE_CFG_REG (TYPEC_BASE + 0x44)
+#define TYPEC_TRY_MODE_MASK GENMASK(4, 3)
+#define EN_TRY_SNK_BIT BIT(4)
+#define EN_TRY_SRC_BIT BIT(3)
+#define TYPEC_POWER_ROLE_CMD_MASK GENMASK(2, 0)
+#define EN_SRC_ONLY_BIT BIT(2)
+#define EN_SNK_ONLY_BIT BIT(1)
+#define TYPEC_DISABLE_CMD_BIT BIT(0)
+
+#define TYPE_C_VCONN_CONTROL_REG (TYPEC_BASE + 0x46)
+#define VCONN_EN_ORIENTATION_BIT BIT(2)
+#define VCONN_EN_VALUE_BIT BIT(1)
+#define VCONN_EN_SRC_BIT BIT(0)
+
+#define TYPE_C_CCOUT_CONTROL_REG (TYPEC_BASE + 0x48)
+#define TYPEC_CCOUT_BUFFER_EN_BIT BIT(2)
+#define TYPEC_CCOUT_VALUE_BIT BIT(1)
+#define TYPEC_CCOUT_SRC_BIT BIT(0)
+
+#define TYPE_C_CRUDE_SENSOR_CFG_REG (TYPEC_BASE + 0x4e)
+#define EN_SRC_CRUDE_SENSOR_BIT BIT(1)
+#define EN_SNK_CRUDE_SENSOR_BIT BIT(0)
+
+#define TYPE_C_EXIT_STATE_CFG_REG (TYPEC_BASE + 0x50)
+#define BYPASS_VSAFE0V_DURING_ROLE_SWAP_BIT BIT(3)
+#define EXIT_SNK_BASED_ON_CC_BIT BIT(0)
+
+#define TYPE_C_CURRSRC_CFG_REG (TYPEC_BASE + 0x52)
+#define TYPEC_SRC_RP_SEL_MASK GENMASK(1, 0)
+enum {
+ TYPEC_SRC_RP_STD,
+ TYPEC_SRC_RP_1P5A,
+ TYPEC_SRC_RP_3A,
+ TYPEC_SRC_RP_3A_DUPLICATE,
+ TYPEC_SRC_RP_MAX_ELEMENTS
+};
+
+#define TYPE_C_INTERRUPT_EN_CFG_1_REG (TYPEC_BASE + 0x5E)
+#define TYPEC_LEGACY_CABLE_INT_EN_BIT BIT(7)
+#define TYPEC_NONCOMPLIANT_LEGACY_CABLE_INT_EN_BIT BIT(6)
+#define TYPEC_TRYSOURCE_DETECT_INT_EN_BIT BIT(5)
+#define TYPEC_TRYSINK_DETECT_INT_EN_BIT BIT(4)
+#define TYPEC_CCOUT_DETACH_INT_EN_BIT BIT(3)
+#define TYPEC_CCOUT_ATTACH_INT_EN_BIT BIT(2)
+#define TYPEC_VBUS_DEASSERT_INT_EN_BIT BIT(1)
+#define TYPEC_VBUS_ASSERT_INT_EN_BIT BIT(0)
+
+#define TYPE_C_INTERRUPT_EN_CFG_2_REG (TYPEC_BASE + 0x60)
+#define TYPEC_SRC_BATT_HPWR_INT_EN_BIT BIT(6)
+#define MICRO_USB_STATE_CHANGE_INT_EN_BIT BIT(5)
+#define TYPEC_STATE_MACHINE_CHANGE_INT_EN_BIT BIT(4)
+#define TYPEC_DEBUG_ACCESS_DETECT_INT_EN_BIT BIT(3)
+#define TYPEC_WATER_DETECTION_INT_EN_BIT BIT(2)
+#define TYPEC_VBUS_ERROR_INT_EN_BIT BIT(1)
+#define TYPEC_DEBOUNCE_DONE_INT_EN_BIT BIT(0)
+
+#define TYPE_C_DEBOUNCE_OPTION_REG (TYPEC_BASE + 0x62)
+#define REDUCE_TCCDEBOUNCE_TO_2MS_BIT BIT(2)
+
+#define TYPE_C_SBU_CFG_REG (TYPEC_BASE + 0x6A)
+#define SEL_SBU1_ISRC_VAL 0x04
+#define SEL_SBU2_ISRC_VAL 0x01
+
+#define TYPEC_U_USB_CFG_REG (TYPEC_BASE + 0x70)
+#define EN_MICRO_USB_MODE_BIT BIT(0)
+
+#define TYPEC_MICRO_USB_MODE_REG (TYPEC_BASE + 0x73)
+#define MICRO_USB_MODE_ONLY_BIT BIT(0)
+/********************************
+ * MISC Peripheral Registers *
+ ********************************/
+#define TEMP_RANGE_STATUS_REG (MISC_BASE + 0x06)
+#define THERM_REG_ACTIVE_BIT BIT(6)
+#define TLIM_BIT BIT(5)
+#define TEMP_RANGE_MASK GENMASK(4, 1)
+#define ALERT_LEVEL_BIT BIT(4)
+#define TEMP_ABOVE_RANGE_BIT BIT(3)
+#define TEMP_WITHIN_RANGE_BIT BIT(2)
+#define TEMP_BELOW_RANGE_BIT BIT(1)
+#define THERMREG_DISABLED_BIT BIT(0)
+
+#define DIE_TEMP_STATUS_REG (MISC_BASE + 0x07)
+#define DIE_TEMP_SHDN_BIT BIT(3)
+#define DIE_TEMP_RST_BIT BIT(2)
+#define DIE_TEMP_UB_BIT BIT(1)
+#define DIE_TEMP_LB_BIT BIT(0)
+
+#define CONNECTOR_TEMP_STATUS_REG (MISC_BASE + 0x09)
+#define CONNECTOR_TEMP_SHDN_BIT BIT(3)
+#define CONNECTOR_TEMP_RST_BIT BIT(2)
+#define CONNECTOR_TEMP_UB_BIT BIT(1)
+#define CONNECTOR_TEMP_LB_BIT BIT(0)
+
+#define BARK_BITE_WDOG_PET_REG (MISC_BASE + 0x43)
+#define BARK_BITE_WDOG_PET_BIT BIT(0)
+
+#define AICL_CMD_REG (MISC_BASE + 0x44)
+#define RERUN_AICL_BIT BIT(0)
+
+#define MISC_SMB_EN_CMD_REG (MISC_BASE + 0x48)
+#define SMB_EN_OVERRIDE_VALUE_BIT BIT(4)
+#define SMB_EN_OVERRIDE_BIT BIT(3)
+#define EN_STAT_CMD_BIT BIT(2)
+#define EN_CP_FPF_CMD_BIT BIT(1)
+#define EN_CP_CMD_BIT BIT(0)
+
+#define WD_CFG_REG (MISC_BASE + 0x51)
+#define WATCHDOG_TRIGGER_AFP_EN_BIT BIT(7)
+#define BARK_WDOG_INT_EN_BIT BIT(6)
+#define WDOG_TIMER_EN_ON_PLUGIN_BIT BIT(1)
+
+#define SNARL_BARK_BITE_WD_CFG_REG (MISC_BASE + 0x53)
+#define BITE_WDOG_DISABLE_CHARGING_CFG_BIT BIT(7)
+#define SNARL_WDOG_TIMEOUT_MASK GENMASK(6, 4)
+#define SNARL_WDOG_TMOUT_62P5MS 0x00
+#define SNARL_WDOG_TMOUT_1S 0x40
+#define SNARL_WDOG_TMOUT_8S 0x70
+#define BARK_WDOG_TIMEOUT_MASK GENMASK(3, 2)
+#define BARK_WDOG_TIMEOUT_SHIFT 2
+#define BITE_WDOG_TIMEOUT_MASK GENMASK(1, 0)
+#define BITE_WDOG_TIMEOUT_8S 0x3
+#define MIN_WD_BARK_TIME 16
+
+#define AICL_RERUN_TIME_CFG_REG (MISC_BASE + 0x61)
+#define AICL_RERUN_TIME_12S_VAL 0x01
+
+#define MISC_THERMREG_SRC_CFG_REG (MISC_BASE + 0x70)
+#define THERMREG_SMB_ADC_SRC_EN_BIT BIT(5)
+#define THERMREG_DIE_CMP_SRC_EN_BIT BIT(0)
+
+#define MISC_SMB_CFG_REG (MISC_BASE + 0x90)
+#define SMB_EN_SEL_BIT BIT(4)
+#define CP_EN_POLARITY_CFG_BIT BIT(3)
+#define STAT_POLARITY_CFG_BIT BIT(2)
+#define STAT_FUNCTION_CFG_BIT BIT(1)
+#define STAT_IRQ_PULSING_EN_BIT BIT(0)
+
+#define DIE_REG_H_THRESHOLD_MSB_REG (MISC_BASE + 0xA0)
+
+#define SMB_REG_H_THRESHOLD_MSB_REG (MISC_BASE + 0XBC)
+
+#endif /* __SMB5_CHARGER_REG_H */
diff --git a/drivers/power/supply/qcom/step-chg-jeita.c b/drivers/power/supply/qcom/step-chg-jeita.c
new file mode 100644
index 0000000..d8f7e14
--- /dev/null
+++ b/drivers/power/supply/qcom/step-chg-jeita.c
@@ -0,0 +1,831 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "QCOM-STEPCHG: %s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_batterydata.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+#include <linux/pmic-voter.h>
+#include "step-chg-jeita.h"
+
+#define STEP_CHG_VOTER "STEP_CHG_VOTER"
+#define JEITA_VOTER "JEITA_VOTER"
+
+#define is_between(left, right, value) \
+ (((left) >= (right) && (left) >= (value) \
+ && (value) >= (right)) \
+ || ((left) <= (right) && (left) <= (value) \
+ && (value) <= (right)))
+
+struct step_chg_cfg {
+ struct step_chg_jeita_param param;
+ struct range_data fcc_cfg[MAX_STEP_CHG_ENTRIES];
+};
+
+struct jeita_fcc_cfg {
+ struct step_chg_jeita_param param;
+ struct range_data fcc_cfg[MAX_STEP_CHG_ENTRIES];
+};
+
+struct jeita_fv_cfg {
+ struct step_chg_jeita_param param;
+ struct range_data fv_cfg[MAX_STEP_CHG_ENTRIES];
+};
+
+struct step_chg_info {
+ struct device *dev;
+ ktime_t step_last_update_time;
+ ktime_t jeita_last_update_time;
+ bool step_chg_enable;
+ bool sw_jeita_enable;
+ bool config_is_read;
+ bool step_chg_cfg_valid;
+ bool sw_jeita_cfg_valid;
+ bool soc_based_step_chg;
+ bool ocv_based_step_chg;
+ bool batt_missing;
+ int jeita_fcc_index;
+ int jeita_fv_index;
+ int step_index;
+ int get_config_retry_count;
+
+ struct step_chg_cfg *step_chg_config;
+ struct jeita_fcc_cfg *jeita_fcc_config;
+ struct jeita_fv_cfg *jeita_fv_config;
+
+ struct votable *fcc_votable;
+ struct votable *fv_votable;
+ struct votable *usb_icl_votable;
+ struct wakeup_source *step_chg_ws;
+ struct power_supply *batt_psy;
+ struct power_supply *bms_psy;
+ struct power_supply *usb_psy;
+ struct power_supply *main_psy;
+ struct delayed_work status_change_work;
+ struct delayed_work get_config_work;
+ struct notifier_block nb;
+};
+
+static struct step_chg_info *the_chip;
+
+#define STEP_CHG_HYSTERISIS_DELAY_US 5000000 /* 5 secs */
+
+#define BATT_HOT_DECIDEGREE_MAX 600
+#define GET_CONFIG_DELAY_MS 2000
+#define GET_CONFIG_RETRY_COUNT 50
+#define WAIT_BATT_ID_READY_MS 200
+
+static bool is_batt_available(struct step_chg_info *chip)
+{
+ if (!chip->batt_psy)
+ chip->batt_psy = power_supply_get_by_name("battery");
+
+ if (!chip->batt_psy)
+ return false;
+
+ return true;
+}
+
+static bool is_bms_available(struct step_chg_info *chip)
+{
+ if (!chip->bms_psy)
+ chip->bms_psy = power_supply_get_by_name("bms");
+
+ if (!chip->bms_psy)
+ return false;
+
+ return true;
+}
+
+static bool is_usb_available(struct step_chg_info *chip)
+{
+ if (!chip->usb_psy)
+ chip->usb_psy = power_supply_get_by_name("usb");
+
+ if (!chip->usb_psy)
+ return false;
+
+ return true;
+}
+
+int read_range_data_from_node(struct device_node *node,
+ const char *prop_str, struct range_data *ranges,
+ u32 max_threshold, u32 max_value)
+{
+ int rc = 0, i, length, per_tuple_length, tuples;
+
+ if (!node || !prop_str || !ranges) {
+ pr_err("Invalid parameters passed\n");
+ return -EINVAL;
+ }
+
+ rc = of_property_count_elems_of_size(node, prop_str, sizeof(u32));
+ if (rc < 0) {
+ pr_err("Count %s failed, rc=%d\n", prop_str, rc);
+ return rc;
+ }
+
+ length = rc;
+ per_tuple_length = sizeof(struct range_data) / sizeof(u32);
+ if (length % per_tuple_length) {
+ pr_err("%s length (%d) should be multiple of %d\n",
+ prop_str, length, per_tuple_length);
+ return -EINVAL;
+ }
+ tuples = length / per_tuple_length;
+
+ if (tuples > MAX_STEP_CHG_ENTRIES) {
+ pr_err("too many entries(%d), only %d allowed\n",
+ tuples, MAX_STEP_CHG_ENTRIES);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32_array(node, prop_str,
+ (u32 *)ranges, length);
+ if (rc) {
+ pr_err("Read %s failed, rc=%d\n", prop_str, rc);
+ return rc;
+ }
+
+ for (i = 0; i < tuples; i++) {
+ if (ranges[i].low_threshold >
+ ranges[i].high_threshold) {
+ pr_err("%s thresholds should be in ascendant ranges\n",
+ prop_str);
+ rc = -EINVAL;
+ goto clean;
+ }
+
+ if (i != 0) {
+ if (ranges[i - 1].high_threshold >
+ ranges[i].low_threshold) {
+ pr_err("%s thresholds should be in ascendant ranges\n",
+ prop_str);
+ rc = -EINVAL;
+ goto clean;
+ }
+ }
+
+ if (ranges[i].low_threshold > max_threshold)
+ ranges[i].low_threshold = max_threshold;
+ if (ranges[i].high_threshold > max_threshold)
+ ranges[i].high_threshold = max_threshold;
+ if (ranges[i].value > max_value)
+ ranges[i].value = max_value;
+ }
+
+ return rc;
+clean:
+ memset(ranges, 0, tuples * sizeof(struct range_data));
+ return rc;
+}
+EXPORT_SYMBOL(read_range_data_from_node);
+
+static int get_step_chg_jeita_setting_from_profile(struct step_chg_info *chip)
+{
+ struct device_node *batt_node, *profile_node;
+ u32 max_fv_uv, max_fcc_ma;
+ const char *batt_type_str;
+ const __be32 *handle;
+ int batt_id_ohms, rc;
+ union power_supply_propval prop = {0, };
+
+ handle = of_get_property(chip->dev->of_node,
+ "qcom,battery-data", NULL);
+ if (!handle) {
+ pr_debug("ignore getting sw-jeita/step charging settings from profile\n");
+ return 0;
+ }
+
+ batt_node = of_find_node_by_phandle(be32_to_cpup(handle));
+ if (!batt_node) {
+ pr_err("Get battery data node failed\n");
+ return -EINVAL;
+ }
+
+ if (!is_bms_available(chip))
+ return -ENODEV;
+
+ power_supply_get_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_RESISTANCE_ID, &prop);
+ batt_id_ohms = prop.intval;
+
+ /* bms_psy has not yet read the batt_id */
+ if (batt_id_ohms < 0)
+ return -EBUSY;
+
+ profile_node = of_batterydata_get_best_profile(batt_node,
+ batt_id_ohms / 1000, NULL);
+ if (IS_ERR(profile_node))
+ return PTR_ERR(profile_node);
+
+ if (!profile_node) {
+ pr_err("Couldn't find profile\n");
+ return -ENODATA;
+ }
+
+ rc = of_property_read_string(profile_node, "qcom,battery-type",
+ &batt_type_str);
+ if (rc < 0) {
+ pr_err("battery type unavailable, rc:%d\n", rc);
+ return rc;
+ }
+ pr_debug("battery: %s detected, getting sw-jeita/step charging settings\n",
+ batt_type_str);
+
+ rc = of_property_read_u32(profile_node, "qcom,max-voltage-uv",
+ &max_fv_uv);
+ if (rc < 0) {
+ pr_err("max-voltage_uv reading failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(profile_node, "qcom,fastchg-current-ma",
+ &max_fcc_ma);
+ if (rc < 0) {
+ pr_err("max-fastchg-current-ma reading failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ chip->soc_based_step_chg =
+ of_property_read_bool(profile_node, "qcom,soc-based-step-chg");
+ if (chip->soc_based_step_chg) {
+ chip->step_chg_config->param.psy_prop =
+ POWER_SUPPLY_PROP_CAPACITY;
+ chip->step_chg_config->param.prop_name = "SOC";
+ chip->step_chg_config->param.hysteresis = 0;
+ }
+
+ chip->ocv_based_step_chg =
+ of_property_read_bool(profile_node, "qcom,ocv-based-step-chg");
+ if (chip->ocv_based_step_chg) {
+ chip->step_chg_config->param.psy_prop =
+ POWER_SUPPLY_PROP_VOLTAGE_OCV;
+ chip->step_chg_config->param.prop_name = "OCV";
+ chip->step_chg_config->param.hysteresis = 10000;
+ chip->step_chg_config->param.use_bms = true;
+ }
+
+ chip->step_chg_cfg_valid = true;
+ rc = read_range_data_from_node(profile_node,
+ "qcom,step-chg-ranges",
+ chip->step_chg_config->fcc_cfg,
+ chip->soc_based_step_chg ? 100 : max_fv_uv,
+ max_fcc_ma * 1000);
+ if (rc < 0) {
+ pr_debug("Read qcom,step-chg-ranges failed from battery profile, rc=%d\n",
+ rc);
+ chip->step_chg_cfg_valid = false;
+ }
+
+ chip->sw_jeita_cfg_valid = true;
+ rc = read_range_data_from_node(profile_node,
+ "qcom,jeita-fcc-ranges",
+ chip->jeita_fcc_config->fcc_cfg,
+ BATT_HOT_DECIDEGREE_MAX, max_fcc_ma * 1000);
+ if (rc < 0) {
+ pr_debug("Read qcom,jeita-fcc-ranges failed from battery profile, rc=%d\n",
+ rc);
+ chip->sw_jeita_cfg_valid = false;
+ }
+
+ rc = read_range_data_from_node(profile_node,
+ "qcom,jeita-fv-ranges",
+ chip->jeita_fv_config->fv_cfg,
+ BATT_HOT_DECIDEGREE_MAX, max_fv_uv);
+ if (rc < 0) {
+ pr_debug("Read qcom,jeita-fv-ranges failed from battery profile, rc=%d\n",
+ rc);
+ chip->sw_jeita_cfg_valid = false;
+ }
+
+ return rc;
+}
+
+static void get_config_work(struct work_struct *work)
+{
+ struct step_chg_info *chip = container_of(work,
+ struct step_chg_info, get_config_work.work);
+ int i, rc;
+
+ chip->config_is_read = false;
+ rc = get_step_chg_jeita_setting_from_profile(chip);
+
+ if (rc < 0) {
+ if (rc == -ENODEV || rc == -EBUSY) {
+ if (chip->get_config_retry_count++
+ < GET_CONFIG_RETRY_COUNT) {
+ pr_debug("bms_psy is not ready, retry: %d\n",
+ chip->get_config_retry_count);
+ goto reschedule;
+ }
+ }
+ }
+
+ chip->config_is_read = true;
+
+ for (i = 0; i < MAX_STEP_CHG_ENTRIES; i++)
+ pr_debug("step-chg-cfg: %duV(SoC) ~ %duV(SoC), %duA\n",
+ chip->step_chg_config->fcc_cfg[i].low_threshold,
+ chip->step_chg_config->fcc_cfg[i].high_threshold,
+ chip->step_chg_config->fcc_cfg[i].value);
+ for (i = 0; i < MAX_STEP_CHG_ENTRIES; i++)
+ pr_debug("jeita-fcc-cfg: %ddecidegree ~ %ddecidegre, %duA\n",
+ chip->jeita_fcc_config->fcc_cfg[i].low_threshold,
+ chip->jeita_fcc_config->fcc_cfg[i].high_threshold,
+ chip->jeita_fcc_config->fcc_cfg[i].value);
+ for (i = 0; i < MAX_STEP_CHG_ENTRIES; i++)
+ pr_debug("jeita-fv-cfg: %ddecidegree ~ %ddecidegre, %duV\n",
+ chip->jeita_fv_config->fv_cfg[i].low_threshold,
+ chip->jeita_fv_config->fv_cfg[i].high_threshold,
+ chip->jeita_fv_config->fv_cfg[i].value);
+
+ return;
+
+reschedule:
+ schedule_delayed_work(&chip->get_config_work,
+ msecs_to_jiffies(GET_CONFIG_DELAY_MS));
+
+}
+
+static int get_val(struct range_data *range, int hysteresis, int current_index,
+ int threshold,
+ int *new_index, int *val)
+{
+ int i;
+
+ *new_index = -EINVAL;
+
+ /*
+ * If the threshold is lesser than the minimum allowed range,
+ * return -ENODATA.
+ */
+ if (threshold < range[0].low_threshold)
+ return -ENODATA;
+
+ /* First try to find the matching index without hysteresis */
+ for (i = 0; i < MAX_STEP_CHG_ENTRIES; i++) {
+ if (!range[i].high_threshold && !range[i].low_threshold) {
+ /* First invalid table entry; exit loop */
+ break;
+ }
+
+ if (is_between(range[i].low_threshold,
+ range[i].high_threshold, threshold)) {
+ *new_index = i;
+ *val = range[i].value;
+ break;
+ }
+ }
+
+ /*
+ * If nothing was found, the threshold exceeds the max range for sure
+ * as the other case where it is lesser than the min range is handled
+ * at the very beginning of this function. Therefore, clip it to the
+ * max allowed range value, which is the one corresponding to the last
+ * valid entry in the battery profile data array.
+ */
+ if (*new_index == -EINVAL) {
+ if (i == 0) {
+ /* Battery profile data array is completely invalid */
+ return -ENODATA;
+ }
+
+ *new_index = (i - 1);
+ *val = range[*new_index].value;
+ }
+
+ /*
+ * If we don't have a current_index return this
+ * newfound value. There is no hysterisis from out of range
+ * to in range transition
+ */
+ if (current_index == -EINVAL)
+ return 0;
+
+ /*
+ * Check for hysteresis if it in the neighbourhood
+ * of our current index.
+ */
+ if (*new_index == current_index + 1) {
+ if (threshold < range[*new_index].low_threshold + hysteresis) {
+ /*
+ * Stay in the current index, threshold is not higher
+ * by hysteresis amount
+ */
+ *new_index = current_index;
+ *val = range[current_index].value;
+ }
+ } else if (*new_index == current_index - 1) {
+ if (threshold > range[*new_index].high_threshold - hysteresis) {
+ /*
+ * stay in the current index, threshold is not lower
+ * by hysteresis amount
+ */
+ *new_index = current_index;
+ *val = range[current_index].value;
+ }
+ }
+ return 0;
+}
+
+static int handle_step_chg_config(struct step_chg_info *chip)
+{
+ union power_supply_propval pval = {0, };
+ int rc = 0, fcc_ua = 0;
+ u64 elapsed_us;
+
+ elapsed_us = ktime_us_delta(ktime_get(), chip->step_last_update_time);
+ if (elapsed_us < STEP_CHG_HYSTERISIS_DELAY_US)
+ goto reschedule;
+
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED, &pval);
+ if (rc < 0)
+ chip->step_chg_enable = false;
+ else
+ chip->step_chg_enable = pval.intval;
+
+ if (!chip->step_chg_enable || !chip->step_chg_cfg_valid) {
+ if (chip->fcc_votable)
+ vote(chip->fcc_votable, STEP_CHG_VOTER, false, 0);
+ goto update_time;
+ }
+
+ if (chip->step_chg_config->param.use_bms)
+ rc = power_supply_get_property(chip->bms_psy,
+ chip->step_chg_config->param.psy_prop, &pval);
+ else
+ rc = power_supply_get_property(chip->batt_psy,
+ chip->step_chg_config->param.psy_prop, &pval);
+
+ if (rc < 0) {
+ pr_err("Couldn't read %s property rc=%d\n",
+ chip->step_chg_config->param.prop_name, rc);
+ return rc;
+ }
+
+ rc = get_val(chip->step_chg_config->fcc_cfg,
+ chip->step_chg_config->param.hysteresis,
+ chip->step_index,
+ pval.intval,
+ &chip->step_index,
+ &fcc_ua);
+ if (rc < 0) {
+ /* remove the vote if no step-based fcc is found */
+ if (chip->fcc_votable)
+ vote(chip->fcc_votable, STEP_CHG_VOTER, false, 0);
+ goto update_time;
+ }
+
+ if (!chip->fcc_votable)
+ chip->fcc_votable = find_votable("FCC");
+ if (!chip->fcc_votable)
+ return -EINVAL;
+
+ vote(chip->fcc_votable, STEP_CHG_VOTER, true, fcc_ua);
+
+ pr_debug("%s = %d Step-FCC = %duA\n",
+ chip->step_chg_config->param.prop_name, pval.intval, fcc_ua);
+
+update_time:
+ chip->step_last_update_time = ktime_get();
+ return 0;
+
+reschedule:
+ /* reschedule 1000uS after the remaining time */
+ return (STEP_CHG_HYSTERISIS_DELAY_US - elapsed_us + 1000);
+}
+
+#define JEITA_SUSPEND_HYST_UV 50000
+static int handle_jeita(struct step_chg_info *chip)
+{
+ union power_supply_propval pval = {0, };
+ int rc = 0, fcc_ua = 0, fv_uv = 0;
+ u64 elapsed_us;
+
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_SW_JEITA_ENABLED, &pval);
+ if (rc < 0)
+ chip->sw_jeita_enable = false;
+ else
+ chip->sw_jeita_enable = pval.intval;
+
+ if (!chip->sw_jeita_enable || !chip->sw_jeita_cfg_valid) {
+ if (chip->fcc_votable)
+ vote(chip->fcc_votable, JEITA_VOTER, false, 0);
+ if (chip->fv_votable)
+ vote(chip->fv_votable, JEITA_VOTER, false, 0);
+ if (chip->usb_icl_votable)
+ vote(chip->usb_icl_votable, JEITA_VOTER, false, 0);
+ return 0;
+ }
+
+ elapsed_us = ktime_us_delta(ktime_get(), chip->jeita_last_update_time);
+ if (elapsed_us < STEP_CHG_HYSTERISIS_DELAY_US)
+ goto reschedule;
+
+ if (chip->jeita_fcc_config->param.use_bms)
+ rc = power_supply_get_property(chip->bms_psy,
+ chip->jeita_fcc_config->param.psy_prop, &pval);
+ else
+ rc = power_supply_get_property(chip->batt_psy,
+ chip->jeita_fcc_config->param.psy_prop, &pval);
+
+ if (rc < 0) {
+ pr_err("Couldn't read %s property rc=%d\n",
+ chip->jeita_fcc_config->param.prop_name, rc);
+ return rc;
+ }
+
+ rc = get_val(chip->jeita_fcc_config->fcc_cfg,
+ chip->jeita_fcc_config->param.hysteresis,
+ chip->jeita_fcc_index,
+ pval.intval,
+ &chip->jeita_fcc_index,
+ &fcc_ua);
+ if (rc < 0)
+ fcc_ua = 0;
+
+ if (!chip->fcc_votable)
+ chip->fcc_votable = find_votable("FCC");
+ if (!chip->fcc_votable)
+ /* changing FCC is a must */
+ return -EINVAL;
+
+ vote(chip->fcc_votable, JEITA_VOTER, fcc_ua ? true : false, fcc_ua);
+
+ rc = get_val(chip->jeita_fv_config->fv_cfg,
+ chip->jeita_fv_config->param.hysteresis,
+ chip->jeita_fv_index,
+ pval.intval,
+ &chip->jeita_fv_index,
+ &fv_uv);
+ if (rc < 0)
+ fv_uv = 0;
+
+ chip->fv_votable = find_votable("FV");
+ if (!chip->fv_votable)
+ goto update_time;
+
+ if (!chip->usb_icl_votable)
+ chip->usb_icl_votable = find_votable("USB_ICL");
+
+ if (!chip->usb_icl_votable)
+ goto set_jeita_fv;
+
+ /*
+ * If JEITA float voltage is same as max-vfloat of battery then
+ * skip any further VBAT specific checks.
+ */
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX, &pval);
+ if (rc || (pval.intval == fv_uv)) {
+ vote(chip->usb_icl_votable, JEITA_VOTER, false, 0);
+ goto set_jeita_fv;
+ }
+
+ /*
+ * Suspend USB input path if battery voltage is above
+ * JEITA VFLOAT threshold.
+ */
+ if (fv_uv > 0) {
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW, &pval);
+ if (!rc && (pval.intval > fv_uv))
+ vote(chip->usb_icl_votable, JEITA_VOTER, true, 0);
+ else if (pval.intval < (fv_uv - JEITA_SUSPEND_HYST_UV))
+ vote(chip->usb_icl_votable, JEITA_VOTER, false, 0);
+ }
+
+set_jeita_fv:
+ vote(chip->fv_votable, JEITA_VOTER, fv_uv ? true : false, fv_uv);
+
+update_time:
+ chip->jeita_last_update_time = ktime_get();
+
+ if (!chip->main_psy)
+ chip->main_psy = power_supply_get_by_name("main");
+ if (chip->main_psy)
+ power_supply_changed(chip->main_psy);
+
+ return 0;
+
+reschedule:
+ /* reschedule 1000uS after the remaining time */
+ return (STEP_CHG_HYSTERISIS_DELAY_US - elapsed_us + 1000);
+}
+
+static int handle_battery_insertion(struct step_chg_info *chip)
+{
+ int rc;
+ union power_supply_propval pval = {0, };
+
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ if (rc < 0) {
+ pr_err("Get battery present status failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (chip->batt_missing != (!pval.intval)) {
+ chip->batt_missing = !pval.intval;
+ pr_debug("battery %s detected\n",
+ chip->batt_missing ? "removal" : "insertion");
+ if (chip->batt_missing) {
+ chip->step_chg_cfg_valid = false;
+ chip->sw_jeita_cfg_valid = false;
+ chip->get_config_retry_count = 0;
+ } else {
+ /*
+ * Get config for the new inserted battery, delay
+ * to make sure BMS has read out the batt_id.
+ */
+ schedule_delayed_work(&chip->get_config_work,
+ msecs_to_jiffies(WAIT_BATT_ID_READY_MS));
+ }
+ }
+
+ return rc;
+}
+
+static void status_change_work(struct work_struct *work)
+{
+ struct step_chg_info *chip = container_of(work,
+ struct step_chg_info, status_change_work.work);
+ int rc = 0;
+ int reschedule_us;
+ int reschedule_jeita_work_us = 0;
+ int reschedule_step_work_us = 0;
+ union power_supply_propval prop = {0, };
+
+ if (!is_batt_available(chip) || !is_bms_available(chip))
+ goto exit_work;
+
+ handle_battery_insertion(chip);
+
+ /* skip elapsed_us debounce for handling battery temperature */
+ rc = handle_jeita(chip);
+ if (rc > 0)
+ reschedule_jeita_work_us = rc;
+ else if (rc < 0)
+ pr_err("Couldn't handle sw jeita rc = %d\n", rc);
+
+ rc = handle_step_chg_config(chip);
+ if (rc > 0)
+ reschedule_step_work_us = rc;
+ if (rc < 0)
+ pr_err("Couldn't handle step rc = %d\n", rc);
+
+ /* Remove stale votes on USB removal */
+ if (is_usb_available(chip)) {
+ prop.intval = 0;
+ power_supply_get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_PRESENT, &prop);
+ if (!prop.intval) {
+ if (chip->usb_icl_votable)
+ vote(chip->usb_icl_votable, JEITA_VOTER,
+ false, 0);
+ }
+ }
+
+ reschedule_us = min(reschedule_jeita_work_us, reschedule_step_work_us);
+ if (reschedule_us == 0)
+ goto exit_work;
+ else
+ schedule_delayed_work(&chip->status_change_work,
+ usecs_to_jiffies(reschedule_us));
+ return;
+
+exit_work:
+ __pm_relax(chip->step_chg_ws);
+}
+
+static int step_chg_notifier_call(struct notifier_block *nb,
+ unsigned long ev, void *v)
+{
+ struct power_supply *psy = v;
+ struct step_chg_info *chip = container_of(nb, struct step_chg_info, nb);
+
+ if (ev != PSY_EVENT_PROP_CHANGED)
+ return NOTIFY_OK;
+
+ if ((strcmp(psy->desc->name, "battery") == 0)
+ || (strcmp(psy->desc->name, "usb") == 0)) {
+ __pm_stay_awake(chip->step_chg_ws);
+ schedule_delayed_work(&chip->status_change_work, 0);
+ }
+
+ if ((strcmp(psy->desc->name, "bms") == 0)) {
+ if (chip->bms_psy == NULL)
+ chip->bms_psy = psy;
+ if (!chip->config_is_read)
+ schedule_delayed_work(&chip->get_config_work, 0);
+ }
+
+ return NOTIFY_OK;
+}
+
+static int step_chg_register_notifier(struct step_chg_info *chip)
+{
+ int rc;
+
+ chip->nb.notifier_call = step_chg_notifier_call;
+ rc = power_supply_reg_notifier(&chip->nb);
+ if (rc < 0) {
+ pr_err("Couldn't register psy notifier rc = %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+int qcom_step_chg_init(struct device *dev,
+ bool step_chg_enable, bool sw_jeita_enable)
+{
+ int rc;
+ struct step_chg_info *chip;
+
+ if (the_chip) {
+ pr_err("Already initialized\n");
+ return -EINVAL;
+ }
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->step_chg_ws = wakeup_source_register("qcom-step-chg");
+ if (!chip->step_chg_ws)
+ return -EINVAL;
+
+ chip->dev = dev;
+ chip->step_chg_enable = step_chg_enable;
+ chip->sw_jeita_enable = sw_jeita_enable;
+ chip->step_index = -EINVAL;
+ chip->jeita_fcc_index = -EINVAL;
+ chip->jeita_fv_index = -EINVAL;
+
+ chip->step_chg_config = devm_kzalloc(dev,
+ sizeof(struct step_chg_cfg), GFP_KERNEL);
+ if (!chip->step_chg_config)
+ return -ENOMEM;
+
+ chip->step_chg_config->param.psy_prop = POWER_SUPPLY_PROP_VOLTAGE_NOW;
+ chip->step_chg_config->param.prop_name = "VBATT";
+ chip->step_chg_config->param.hysteresis = 100000;
+
+ chip->jeita_fcc_config = devm_kzalloc(dev,
+ sizeof(struct jeita_fcc_cfg), GFP_KERNEL);
+ chip->jeita_fv_config = devm_kzalloc(dev,
+ sizeof(struct jeita_fv_cfg), GFP_KERNEL);
+ if (!chip->jeita_fcc_config || !chip->jeita_fv_config)
+ return -ENOMEM;
+
+ chip->jeita_fcc_config->param.psy_prop = POWER_SUPPLY_PROP_TEMP;
+ chip->jeita_fcc_config->param.prop_name = "BATT_TEMP";
+ chip->jeita_fcc_config->param.hysteresis = 10;
+ chip->jeita_fv_config->param.psy_prop = POWER_SUPPLY_PROP_TEMP;
+ chip->jeita_fv_config->param.prop_name = "BATT_TEMP";
+ chip->jeita_fv_config->param.hysteresis = 10;
+
+ INIT_DELAYED_WORK(&chip->status_change_work, status_change_work);
+ INIT_DELAYED_WORK(&chip->get_config_work, get_config_work);
+
+ rc = step_chg_register_notifier(chip);
+ if (rc < 0) {
+ pr_err("Couldn't register psy notifier rc = %d\n", rc);
+ goto release_wakeup_source;
+ }
+
+ schedule_delayed_work(&chip->get_config_work,
+ msecs_to_jiffies(GET_CONFIG_DELAY_MS));
+
+ the_chip = chip;
+
+ return 0;
+
+release_wakeup_source:
+ wakeup_source_unregister(chip->step_chg_ws);
+ return rc;
+}
+
+void qcom_step_chg_deinit(void)
+{
+ struct step_chg_info *chip = the_chip;
+
+ if (!chip)
+ return;
+
+ cancel_delayed_work_sync(&chip->status_change_work);
+ cancel_delayed_work_sync(&chip->get_config_work);
+ power_supply_unreg_notifier(&chip->nb);
+ wakeup_source_unregister(chip->step_chg_ws);
+ the_chip = NULL;
+}
diff --git a/drivers/power/supply/qcom/step-chg-jeita.h b/drivers/power/supply/qcom/step-chg-jeita.h
new file mode 100644
index 0000000..f5431b6
--- /dev/null
+++ b/drivers/power/supply/qcom/step-chg-jeita.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __STEP_CHG_H__
+#define __STEP_CHG_H__
+
+#define MAX_STEP_CHG_ENTRIES 8
+
+struct step_chg_jeita_param {
+ u32 psy_prop;
+ char *prop_name;
+ int hysteresis;
+ bool use_bms;
+};
+
+struct range_data {
+ u32 low_threshold;
+ u32 high_threshold;
+ u32 value;
+};
+
+int qcom_step_chg_init(struct device *dev,
+ bool step_chg_enable, bool sw_jeita_enable);
+void qcom_step_chg_deinit(void);
+int read_range_data_from_node(struct device_node *node,
+ const char *prop_str, struct range_data *ranges,
+ u32 max_threshold, u32 max_value);
+#endif /* __STEP_CHG_H__ */
diff --git a/drivers/power/supply/qcom/storm-watch.c b/drivers/power/supply/qcom/storm-watch.c
new file mode 100644
index 0000000..0794705
--- /dev/null
+++ b/drivers/power/supply/qcom/storm-watch.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ */
+
+#include "storm-watch.h"
+
+/**
+ * is_storming(): Check if an event is storming
+ *
+ * @data: Data for tracking an event storm
+ *
+ * The return value will be true if a storm has been detected and
+ * false if a storm was not detected.
+ */
+bool is_storming(struct storm_watch *data)
+{
+ ktime_t curr_kt, delta_kt;
+ bool is_storming = false;
+
+ if (!data)
+ return false;
+
+ if (!data->enabled)
+ return false;
+
+ /* max storm count must be greater than 0 */
+ if (data->max_storm_count <= 0)
+ return false;
+
+ /* the period threshold must be greater than 0ms */
+ if (data->storm_period_ms <= 0)
+ return false;
+
+ mutex_lock(&data->storm_lock);
+ curr_kt = ktime_get_boottime();
+ delta_kt = ktime_sub(curr_kt, data->last_kt);
+
+ if (ktime_to_ms(delta_kt) < data->storm_period_ms)
+ data->storm_count++;
+ else
+ data->storm_count = 0;
+
+ if (data->storm_count > data->max_storm_count) {
+ is_storming = true;
+ data->storm_count = 0;
+ }
+
+ data->last_kt = curr_kt;
+ mutex_unlock(&data->storm_lock);
+ return is_storming;
+}
+
+void reset_storm_count(struct storm_watch *data)
+{
+ mutex_lock(&data->storm_lock);
+ data->storm_count = 0;
+ mutex_unlock(&data->storm_lock);
+}
+
+void update_storm_count(struct storm_watch *data, int max_count)
+{
+ if (!data)
+ return;
+
+ mutex_lock(&data->storm_lock);
+ data->max_storm_count = max_count;
+ mutex_unlock(&data->storm_lock);
+}
diff --git a/drivers/power/supply/qcom/storm-watch.h b/drivers/power/supply/qcom/storm-watch.h
new file mode 100644
index 0000000..9879d8d
--- /dev/null
+++ b/drivers/power/supply/qcom/storm-watch.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __STORM_WATCH_H
+#define __STORM_WATCH_H
+#include <linux/ktime.h>
+#include <linux/mutex.h>
+
+/**
+ * Data used to track an event storm.
+ *
+ * @storm_period_ms: The maximum time interval between two events. If this limit
+ * is exceeded then the event chain will be broken and removed
+ * from consideration for a storm.
+ * @max_storm_count: The number of chained events required to trigger a storm.
+ * @storm_count: The current number of chained events.
+ * @last_kt: Kernel time of the last event seen.
+ * @storm_lock: Mutex lock to protect storm_watch data.
+ */
+struct storm_watch {
+ bool enabled;
+ int storm_period_ms;
+ int max_storm_count;
+ int storm_count;
+ ktime_t last_kt;
+ struct mutex storm_lock;
+};
+
+bool is_storming(struct storm_watch *data);
+void reset_storm_count(struct storm_watch *data);
+void update_storm_count(struct storm_watch *data, int max_count);
+#endif
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 504d252..f2361a6 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -340,6 +340,16 @@
To compile this driver as a module, choose M here: the module
will be called pwm-pxa.
+config PWM_QTI_LPG
+ tristate "Qualcomm Technologies, Inc. LPG driver"
+ depends on MFD_SPMI_PMIC && OF
+ help
+ This driver supports the LPG (Light Pulse Generator) module found in
+ Qualcomm Technologies, Inc. PMIC chips. Each LPG channel can be
+ configured to operate in PWM mode to output a fixed amplitude with
+ variable duty cycle or in LUT (Look up table) mode to output PWM
+ signal with a modulated amplitude.
+
config PWM_RCAR
tristate "Renesas R-Car PWM support"
depends on ARCH_RENESAS || COMPILE_TEST
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 9c676a0..9014b91 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -34,6 +34,7 @@
obj-$(CONFIG_PWM_PUV3) += pwm-puv3.o
obj-$(CONFIG_PWM_PXA) += pwm-pxa.o
obj-$(CONFIG_PWM_RCAR) += pwm-rcar.o
+obj-$(CONFIG_PWM_QTI_LPG) += pwm-qti-lpg.o
obj-$(CONFIG_PWM_RENESAS_TPU) += pwm-renesas-tpu.o
obj-$(CONFIG_PWM_ROCKCHIP) += pwm-rockchip.o
obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o
diff --git a/drivers/pwm/pwm-qti-lpg.c b/drivers/pwm/pwm-qti-lpg.c
new file mode 100644
index 0000000..03100775
--- /dev/null
+++ b/drivers/pwm/pwm-qti-lpg.c
@@ -0,0 +1,1308 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#define REG_SIZE_PER_LPG 0x100
+#define LPG_BASE "lpg-base"
+#define LUT_BASE "lut-base"
+
+/* LPG module registers */
+#define REG_LPG_PERPH_SUBTYPE 0x05
+#define REG_LPG_PATTERN_CONFIG 0x40
+#define REG_LPG_PWM_SIZE_CLK 0x41
+#define REG_LPG_PWM_FREQ_PREDIV_CLK 0x42
+#define REG_LPG_PWM_TYPE_CONFIG 0x43
+#define REG_LPG_PWM_VALUE_LSB 0x44
+#define REG_LPG_PWM_VALUE_MSB 0x45
+#define REG_LPG_ENABLE_CONTROL 0x46
+#define REG_LPG_PWM_SYNC 0x47
+#define REG_LPG_RAMP_STEP_DURATION_LSB 0x50
+#define REG_LPG_RAMP_STEP_DURATION_MSB 0x51
+#define REG_LPG_PAUSE_HI_MULTIPLIER 0x52
+#define REG_LPG_PAUSE_LO_MULTIPLIER 0x54
+#define REG_LPG_HI_INDEX 0x56
+#define REG_LPG_LO_INDEX 0x57
+
+/* REG_LPG_PATTERN_CONFIG */
+#define LPG_PATTERN_EN_PAUSE_LO BIT(0)
+#define LPG_PATTERN_EN_PAUSE_HI BIT(1)
+#define LPG_PATTERN_RAMP_TOGGLE BIT(2)
+#define LPG_PATTERN_REPEAT BIT(3)
+#define LPG_PATTERN_RAMP_LO_TO_HI BIT(4)
+
+/* REG_LPG_PERPH_SUBTYPE */
+#define SUBTYPE_PWM 0x0b
+#define SUBTYPE_LPG_LITE 0x11
+
+/* REG_LPG_PWM_SIZE_CLK */
+#define LPG_PWM_SIZE_LPG_MASK BIT(4)
+#define LPG_PWM_SIZE_PWM_MASK BIT(2)
+#define LPG_PWM_SIZE_LPG_SHIFT 4
+#define LPG_PWM_SIZE_PWM_SHIFT 2
+#define LPG_PWM_CLK_FREQ_SEL_MASK GENMASK(1, 0)
+
+/* REG_LPG_PWM_FREQ_PREDIV_CLK */
+#define LPG_PWM_FREQ_PREDIV_MASK GENMASK(6, 5)
+#define LPG_PWM_FREQ_PREDIV_SHIFT 5
+#define LPG_PWM_FREQ_EXPONENT_MASK GENMASK(2, 0)
+
+/* REG_LPG_PWM_TYPE_CONFIG */
+#define LPG_PWM_EN_GLITCH_REMOVAL_MASK BIT(5)
+
+/* REG_LPG_PWM_VALUE_LSB */
+#define LPG_PWM_VALUE_LSB_MASK GENMASK(7, 0)
+
+/* REG_LPG_PWM_VALUE_MSB */
+#define LPG_PWM_VALUE_MSB_MASK BIT(0)
+
+/* REG_LPG_ENABLE_CONTROL */
+#define LPG_EN_LPG_OUT_BIT BIT(7)
+#define LPG_EN_LPG_OUT_SHIFT 7
+#define LPG_PWM_SRC_SELECT_MASK BIT(2)
+#define LPG_PWM_SRC_SELECT_SHIFT 2
+#define LPG_EN_RAMP_GEN_MASK BIT(1)
+#define LPG_EN_RAMP_GEN_SHIFT 1
+
+/* REG_LPG_PWM_SYNC */
+#define LPG_PWM_VALUE_SYNC BIT(0)
+
+#define NUM_PWM_SIZE 2
+#define NUM_PWM_CLK 3
+#define NUM_CLK_PREDIV 4
+#define NUM_PWM_EXP 8
+
+#define LPG_HI_LO_IDX_MASK GENMASK(5, 0)
+
+/* LUT module registers */
+#define REG_LPG_LUT_1_LSB 0x42
+#define REG_LPG_LUT_RAMP_CONTROL 0xc8
+
+#define LPG_LUT_VALUE_MSB_MASK BIT(0)
+#define LPG_LUT_COUNT_MAX 47
+
+enum lpg_src {
+ LUT_PATTERN = 0,
+ PWM_VALUE,
+};
+
+static const int pwm_size[NUM_PWM_SIZE] = {6, 9};
+static const int clk_freq_hz[NUM_PWM_CLK] = {1024, 32768, 19200000};
+static const int clk_prediv[NUM_CLK_PREDIV] = {1, 3, 5, 6};
+static const int pwm_exponent[NUM_PWM_EXP] = {0, 1, 2, 3, 4, 5, 6, 7};
+
+struct lpg_ramp_config {
+ u16 step_ms;
+ u8 pause_hi_count;
+ u8 pause_lo_count;
+ u8 hi_idx;
+ u8 lo_idx;
+ bool ramp_dir_low_to_hi;
+ bool pattern_repeat;
+ bool toggle;
+ u32 *pattern;
+ u32 pattern_length;
+};
+
+struct lpg_pwm_config {
+ u32 pwm_size;
+ u32 pwm_clk;
+ u32 prediv;
+ u32 clk_exp;
+ u16 pwm_value;
+ u64 best_period_ns;
+};
+
+struct qpnp_lpg_lut {
+ struct qpnp_lpg_chip *chip;
+ struct mutex lock;
+ u32 reg_base;
+ u32 *pattern; /* patterns in percentage */
+};
+
+struct qpnp_lpg_channel {
+ struct qpnp_lpg_chip *chip;
+ struct lpg_pwm_config pwm_config;
+ struct lpg_ramp_config ramp_config;
+ u32 lpg_idx;
+ u32 reg_base;
+ u32 max_pattern_length;
+ u8 src_sel;
+ u8 subtype;
+ bool lut_written;
+ u64 current_period_ns;
+ u64 current_duty_ns;
+};
+
+struct qpnp_lpg_chip {
+ struct pwm_chip pwm_chip;
+ struct regmap *regmap;
+ struct device *dev;
+ struct qpnp_lpg_channel *lpgs;
+ struct qpnp_lpg_lut *lut;
+ struct mutex bus_lock;
+ u32 *lpg_group;
+ u32 num_lpgs;
+};
+
+static int qpnp_lpg_read(struct qpnp_lpg_channel *lpg, u16 addr, u8 *val)
+{
+ int rc;
+ unsigned int tmp;
+
+ mutex_lock(&lpg->chip->bus_lock);
+ rc = regmap_read(lpg->chip->regmap, lpg->reg_base + addr, &tmp);
+ if (rc < 0)
+ dev_err(lpg->chip->dev, "Read addr 0x%x failed, rc=%d\n",
+ lpg->reg_base + addr, rc);
+ else
+ *val = (u8)tmp;
+ mutex_unlock(&lpg->chip->bus_lock);
+
+ return rc;
+}
+
+static int qpnp_lpg_write(struct qpnp_lpg_channel *lpg, u16 addr, u8 val)
+{
+ int rc;
+
+ mutex_lock(&lpg->chip->bus_lock);
+ rc = regmap_write(lpg->chip->regmap, lpg->reg_base + addr, val);
+ if (rc < 0)
+ dev_err(lpg->chip->dev, "Write addr 0x%x with value %d failed, rc=%d\n",
+ lpg->reg_base + addr, val, rc);
+ mutex_unlock(&lpg->chip->bus_lock);
+
+ return rc;
+}
+
+static int qpnp_lpg_masked_write(struct qpnp_lpg_channel *lpg,
+ u16 addr, u8 mask, u8 val)
+{
+ int rc;
+
+ mutex_lock(&lpg->chip->bus_lock);
+ rc = regmap_update_bits(lpg->chip->regmap, lpg->reg_base + addr,
+ mask, val);
+ if (rc < 0)
+ dev_err(lpg->chip->dev, "Update addr 0x%x to val 0x%x with mask 0x%x failed, rc=%d\n",
+ lpg->reg_base + addr, val, mask, rc);
+ mutex_unlock(&lpg->chip->bus_lock);
+
+ return rc;
+}
+
+static int qpnp_lut_write(struct qpnp_lpg_lut *lut, u16 addr, u8 val)
+{
+ int rc;
+
+ mutex_lock(&lut->chip->bus_lock);
+ rc = regmap_write(lut->chip->regmap, lut->reg_base + addr, val);
+ if (rc < 0)
+ dev_err(lut->chip->dev, "Write addr 0x%x with value %d failed, rc=%d\n",
+ lut->reg_base + addr, val, rc);
+ mutex_unlock(&lut->chip->bus_lock);
+
+ return rc;
+}
+
+static int qpnp_lut_masked_write(struct qpnp_lpg_lut *lut,
+ u16 addr, u8 mask, u8 val)
+{
+ int rc;
+
+ mutex_lock(&lut->chip->bus_lock);
+ rc = regmap_update_bits(lut->chip->regmap, lut->reg_base + addr,
+ mask, val);
+ if (rc < 0)
+ dev_err(lut->chip->dev, "Update addr 0x%x to val 0x%x with mask 0x%x failed, rc=%d\n",
+ lut->reg_base + addr, val, mask, rc);
+ mutex_unlock(&lut->chip->bus_lock);
+
+ return rc;
+}
+
+static struct qpnp_lpg_channel *pwm_dev_to_qpnp_lpg(struct pwm_chip *pwm_chip,
+ struct pwm_device *pwm)
+{
+
+ struct qpnp_lpg_chip *chip = container_of(pwm_chip,
+ struct qpnp_lpg_chip, pwm_chip);
+ u32 hw_idx = pwm->hwpwm;
+
+ if (hw_idx >= chip->num_lpgs) {
+ dev_err(chip->dev, "hw index %d out of range [0-%d]\n",
+ hw_idx, chip->num_lpgs - 1);
+ return NULL;
+ }
+
+ return &chip->lpgs[hw_idx];
+}
+
+static int __find_index_in_array(int member, const int array[], int length)
+{
+ int i;
+
+ for (i = 0; i < length; i++) {
+ if (member == array[i])
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int qpnp_lpg_set_glitch_removal(struct qpnp_lpg_channel *lpg, bool en)
+{
+ int rc;
+ u8 mask, val;
+
+ val = en ? LPG_PWM_EN_GLITCH_REMOVAL_MASK : 0;
+ mask = LPG_PWM_EN_GLITCH_REMOVAL_MASK;
+ rc = qpnp_lpg_masked_write(lpg, REG_LPG_PWM_TYPE_CONFIG, mask, val);
+ if (rc < 0)
+ dev_err(lpg->chip->dev, "Write LPG_PWM_TYPE_CONFIG failed, rc=%d\n",
+ rc);
+ return rc;
+}
+
+static int qpnp_lpg_set_pwm_config(struct qpnp_lpg_channel *lpg)
+{
+ int rc;
+ u8 val, mask, shift;
+ int pwm_size_idx, pwm_clk_idx, prediv_idx, clk_exp_idx;
+
+ pwm_size_idx = __find_index_in_array(lpg->pwm_config.pwm_size,
+ pwm_size, ARRAY_SIZE(pwm_size));
+ pwm_clk_idx = __find_index_in_array(lpg->pwm_config.pwm_clk,
+ clk_freq_hz, ARRAY_SIZE(clk_freq_hz));
+ prediv_idx = __find_index_in_array(lpg->pwm_config.prediv,
+ clk_prediv, ARRAY_SIZE(clk_prediv));
+ clk_exp_idx = __find_index_in_array(lpg->pwm_config.clk_exp,
+ pwm_exponent, ARRAY_SIZE(pwm_exponent));
+
+ if (pwm_size_idx < 0 || pwm_clk_idx < 0
+ || prediv_idx < 0 || clk_exp_idx < 0)
+ return -EINVAL;
+
+ /* pwm_clk_idx is 1 bit lower than the register value */
+ pwm_clk_idx += 1;
+ if (lpg->subtype == SUBTYPE_PWM) {
+ shift = LPG_PWM_SIZE_PWM_SHIFT;
+ mask = LPG_PWM_SIZE_PWM_MASK;
+ } else {
+ shift = LPG_PWM_SIZE_LPG_SHIFT;
+ mask = LPG_PWM_SIZE_LPG_MASK;
+ }
+
+ val = pwm_size_idx << shift | pwm_clk_idx;
+ mask |= LPG_PWM_CLK_FREQ_SEL_MASK;
+ rc = qpnp_lpg_masked_write(lpg, REG_LPG_PWM_SIZE_CLK, mask, val);
+ if (rc < 0) {
+ dev_err(lpg->chip->dev, "Write LPG_PWM_SIZE_CLK failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ val = prediv_idx << LPG_PWM_FREQ_PREDIV_SHIFT | clk_exp_idx;
+ mask = LPG_PWM_FREQ_PREDIV_MASK | LPG_PWM_FREQ_EXPONENT_MASK;
+ rc = qpnp_lpg_masked_write(lpg, REG_LPG_PWM_FREQ_PREDIV_CLK, mask, val);
+ if (rc < 0) {
+ dev_err(lpg->chip->dev, "Write LPG_PWM_FREQ_PREDIV_CLK failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (lpg->src_sel == LUT_PATTERN)
+ return 0;
+
+ val = lpg->pwm_config.pwm_value & LPG_PWM_VALUE_LSB_MASK;
+ rc = qpnp_lpg_write(lpg, REG_LPG_PWM_VALUE_LSB, val);
+ if (rc < 0) {
+ dev_err(lpg->chip->dev, "Write LPG_PWM_VALUE_LSB failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ val = lpg->pwm_config.pwm_value >> 8;
+ mask = LPG_PWM_VALUE_MSB_MASK;
+ rc = qpnp_lpg_masked_write(lpg, REG_LPG_PWM_VALUE_MSB, mask, val);
+ if (rc < 0) {
+ dev_err(lpg->chip->dev, "Write LPG_PWM_VALUE_MSB failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ val = LPG_PWM_VALUE_SYNC;
+ rc = qpnp_lpg_write(lpg, REG_LPG_PWM_SYNC, val);
+ if (rc < 0) {
+ dev_err(lpg->chip->dev, "Write LPG_PWM_SYNC failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int qpnp_lpg_set_lut_pattern(struct qpnp_lpg_channel *lpg,
+ unsigned int *pattern, unsigned int length)
+{
+ struct qpnp_lpg_lut *lut = lpg->chip->lut;
+ int i, rc = 0;
+ u16 full_duty_value, pwm_values[LPG_LUT_COUNT_MAX + 1] = {0};
+ u8 lsb, msb, addr;
+
+ if (length > lpg->max_pattern_length) {
+ dev_err(lpg->chip->dev, "new pattern length (%d) larger than predefined (%d)\n",
+ length, lpg->max_pattern_length);
+ return -EINVAL;
+ }
+
+ /* Program LUT pattern */
+ mutex_lock(&lut->lock);
+ addr = REG_LPG_LUT_1_LSB + lpg->ramp_config.lo_idx * 2;
+ for (i = 0; i < length; i++) {
+ full_duty_value = 1 << lpg->pwm_config.pwm_size;
+ pwm_values[i] = pattern[i] * full_duty_value / 100;
+
+ if (unlikely(pwm_values[i] > full_duty_value)) {
+ dev_err(lpg->chip->dev, "PWM value %d exceed the max %d\n",
+ pwm_values[i], full_duty_value);
+ rc = -EINVAL;
+ goto unlock;
+ }
+
+ if (pwm_values[i] == full_duty_value)
+ pwm_values[i] = full_duty_value - 1;
+
+ lsb = pwm_values[i] & 0xff;
+ msb = pwm_values[i] >> 8;
+ rc = qpnp_lut_write(lut, addr++, lsb);
+ if (rc < 0) {
+ dev_err(lpg->chip->dev, "Write NO.%d LUT pattern LSB (%d) failed, rc=%d\n",
+ i, lsb, rc);
+ goto unlock;
+ }
+
+ rc = qpnp_lut_masked_write(lut, addr++,
+ LPG_LUT_VALUE_MSB_MASK, msb);
+ if (rc < 0) {
+ dev_err(lpg->chip->dev, "Write NO.%d LUT pattern MSB (%d) failed, rc=%d\n",
+ i, msb, rc);
+ goto unlock;
+ }
+ }
+ lpg->ramp_config.pattern_length = length;
+unlock:
+ mutex_unlock(&lut->lock);
+
+ return rc;
+}
+
+static int qpnp_lpg_set_ramp_config(struct qpnp_lpg_channel *lpg)
+{
+ struct lpg_ramp_config *ramp = &lpg->ramp_config;
+ u8 lsb, msb, addr, mask, val;
+ int rc = 0;
+
+ /* Set ramp step duration */
+ lsb = ramp->step_ms & 0xff;
+ msb = ramp->step_ms >> 8;
+ addr = REG_LPG_RAMP_STEP_DURATION_LSB;
+ rc = qpnp_lpg_write(lpg, addr, lsb);
+ if (rc < 0) {
+ dev_err(lpg->chip->dev, "Write RAMP_STEP_DURATION_LSB failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+ rc = qpnp_lpg_write(lpg, addr + 1, msb);
+ if (rc < 0) {
+ dev_err(lpg->chip->dev, "Write RAMP_STEP_DURATION_MSB failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* Set hi_idx and lo_idx */
+ rc = qpnp_lpg_masked_write(lpg, REG_LPG_HI_INDEX,
+ LPG_HI_LO_IDX_MASK, ramp->hi_idx);
+ if (rc < 0) {
+ dev_err(lpg->chip->dev, "Write LPG_HI_IDX failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = qpnp_lpg_masked_write(lpg, REG_LPG_LO_INDEX,
+ LPG_HI_LO_IDX_MASK, ramp->lo_idx);
+ if (rc < 0) {
+ dev_err(lpg->chip->dev, "Write LPG_LO_IDX failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* Set pause_hi/lo_count */
+ rc = qpnp_lpg_write(lpg, REG_LPG_PAUSE_HI_MULTIPLIER,
+ ramp->pause_hi_count);
+ if (rc < 0) {
+ dev_err(lpg->chip->dev, "Write LPG_PAUSE_HI_MULTIPLIER failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = qpnp_lpg_write(lpg, REG_LPG_PAUSE_LO_MULTIPLIER,
+ ramp->pause_lo_count);
+ if (rc < 0) {
+ dev_err(lpg->chip->dev, "Write LPG_PAUSE_LO_MULTIPLIER failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* Set LPG_PATTERN_CONFIG */
+ addr = REG_LPG_PATTERN_CONFIG;
+ mask = LPG_PATTERN_EN_PAUSE_LO | LPG_PATTERN_EN_PAUSE_HI
+ | LPG_PATTERN_RAMP_TOGGLE | LPG_PATTERN_REPEAT
+ | LPG_PATTERN_RAMP_LO_TO_HI;
+ val = 0;
+ if (ramp->pause_lo_count != 0)
+ val |= LPG_PATTERN_EN_PAUSE_LO;
+ if (ramp->pause_hi_count != 0)
+ val |= LPG_PATTERN_EN_PAUSE_HI;
+ if (ramp->ramp_dir_low_to_hi)
+ val |= LPG_PATTERN_RAMP_LO_TO_HI;
+ if (ramp->pattern_repeat)
+ val |= LPG_PATTERN_REPEAT;
+ if (ramp->toggle)
+ val |= LPG_PATTERN_RAMP_TOGGLE;
+
+ rc = qpnp_lpg_masked_write(lpg, addr, mask, val);
+ if (rc < 0) {
+ dev_err(lpg->chip->dev, "Write LPG_PATTERN_CONFIG failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static void __qpnp_lpg_calc_pwm_period(u64 period_ns,
+ struct lpg_pwm_config *pwm_config)
+{
+ struct lpg_pwm_config configs[NUM_PWM_SIZE];
+ int i, j, m, n;
+ u64 tmp1, tmp2;
+ u64 clk_period_ns = 0, pwm_clk_period_ns;
+ u64 clk_delta_ns = U64_MAX, min_clk_delta_ns = U64_MAX;
+ u64 pwm_period_delta = U64_MAX, min_pwm_period_delta = U64_MAX;
+ int pwm_size_step;
+
+ /*
+ * (2^pwm_size) * (2^pwm_exp) * prediv * NSEC_PER_SEC
+ * pwm_period = ---------------------------------------------------
+ * clk_freq_hz
+ *
+ * Searching the closest settings for the requested PWM period.
+ */
+ for (n = 0; n < ARRAY_SIZE(pwm_size); n++) {
+ pwm_clk_period_ns = period_ns >> pwm_size[n];
+ for (i = ARRAY_SIZE(clk_freq_hz) - 1; i >= 0; i--) {
+ for (j = 0; j < ARRAY_SIZE(clk_prediv); j++) {
+ for (m = 0; m < ARRAY_SIZE(pwm_exponent); m++) {
+ tmp1 = 1 << pwm_exponent[m];
+ tmp1 *= clk_prediv[j];
+ tmp2 = NSEC_PER_SEC;
+ do_div(tmp2, clk_freq_hz[i]);
+
+ clk_period_ns = tmp1 * tmp2;
+
+ clk_delta_ns = abs(pwm_clk_period_ns
+ - clk_period_ns);
+ /*
+ * Find the closest setting for
+ * PWM frequency predivide value
+ */
+ if (clk_delta_ns < min_clk_delta_ns) {
+ min_clk_delta_ns
+ = clk_delta_ns;
+ configs[n].pwm_clk
+ = clk_freq_hz[i];
+ configs[n].prediv
+ = clk_prediv[j];
+ configs[n].clk_exp
+ = pwm_exponent[m];
+ configs[n].pwm_size
+ = pwm_size[n];
+ configs[n].best_period_ns
+ = clk_period_ns;
+ }
+ }
+ }
+ }
+
+ configs[n].best_period_ns *= 1 << pwm_size[n];
+ /* Find the closest setting for PWM period */
+ pwm_period_delta = min_clk_delta_ns << pwm_size[n];
+ if (pwm_period_delta < min_pwm_period_delta) {
+ min_pwm_period_delta = pwm_period_delta;
+ memcpy(pwm_config, &configs[n],
+ sizeof(struct lpg_pwm_config));
+ }
+ }
+
+ /* Larger PWM size can achieve better resolution for PWM duty */
+ for (n = ARRAY_SIZE(pwm_size) - 1; n > 0; n--) {
+ if (pwm_config->pwm_size >= pwm_size[n])
+ break;
+ pwm_size_step = pwm_size[n] - pwm_config->pwm_size;
+ if (pwm_config->clk_exp >= pwm_size_step) {
+ pwm_config->pwm_size = pwm_size[n];
+ pwm_config->clk_exp -= pwm_size_step;
+ }
+ }
+ pr_debug("PWM setting for period_ns %llu: pwm_clk = %dHZ, prediv = %d, exponent = %d, pwm_size = %d\n",
+ period_ns, pwm_config->pwm_clk, pwm_config->prediv,
+ pwm_config->clk_exp, pwm_config->pwm_size);
+ pr_debug("Actual period: %lluns\n", pwm_config->best_period_ns);
+}
+
+static void __qpnp_lpg_calc_pwm_duty(u64 period_ns, u64 duty_ns,
+ struct lpg_pwm_config *pwm_config)
+{
+ u16 pwm_value, max_pwm_value;
+ u64 tmp;
+
+ tmp = (u64)duty_ns << pwm_config->pwm_size;
+ pwm_value = (u16)div64_u64(tmp, period_ns);
+
+ max_pwm_value = (1 << pwm_config->pwm_size) - 1;
+ if (pwm_value > max_pwm_value)
+ pwm_value = max_pwm_value;
+ pwm_config->pwm_value = pwm_value;
+}
+
+static int qpnp_lpg_config(struct qpnp_lpg_channel *lpg,
+ u64 duty_ns, u64 period_ns)
+{
+ int rc;
+
+ if (duty_ns > period_ns) {
+ dev_err(lpg->chip->dev, "Duty %lluns is larger than period %lluns\n",
+ duty_ns, period_ns);
+ return -EINVAL;
+ }
+
+ if (period_ns != lpg->current_period_ns) {
+ __qpnp_lpg_calc_pwm_period(period_ns, &lpg->pwm_config);
+
+ /* program LUT if PWM period is changed */
+ if (lpg->src_sel == LUT_PATTERN) {
+ rc = qpnp_lpg_set_lut_pattern(lpg,
+ lpg->ramp_config.pattern,
+ lpg->ramp_config.pattern_length);
+ if (rc < 0) {
+ dev_err(lpg->chip->dev, "set LUT pattern failed for LPG%d, rc=%d\n",
+ lpg->lpg_idx, rc);
+ return rc;
+ }
+ lpg->lut_written = true;
+ }
+ }
+
+ if (period_ns != lpg->current_period_ns ||
+ duty_ns != lpg->current_duty_ns)
+ __qpnp_lpg_calc_pwm_duty(period_ns, duty_ns, &lpg->pwm_config);
+
+ rc = qpnp_lpg_set_pwm_config(lpg);
+ if (rc < 0) {
+ dev_err(lpg->chip->dev, "Config PWM failed for channel %d, rc=%d\n",
+ lpg->lpg_idx, rc);
+ return rc;
+ }
+
+ lpg->current_period_ns = period_ns;
+ lpg->current_duty_ns = duty_ns;
+
+ return rc;
+}
+
+static int qpnp_lpg_pwm_config(struct pwm_chip *pwm_chip,
+ struct pwm_device *pwm, int duty_ns, int period_ns)
+{
+ struct qpnp_lpg_channel *lpg;
+
+ lpg = pwm_dev_to_qpnp_lpg(pwm_chip, pwm);
+ if (lpg == NULL) {
+ dev_err(pwm_chip->dev, "lpg not found\n");
+ return -ENODEV;
+ }
+
+ return qpnp_lpg_config(lpg, (u64)duty_ns, (u64)period_ns);
+}
+
+static int qpnp_lpg_pwm_config_extend(struct pwm_chip *pwm_chip,
+ struct pwm_device *pwm, u64 duty_ns, u64 period_ns)
+{
+ struct qpnp_lpg_channel *lpg;
+
+ lpg = pwm_dev_to_qpnp_lpg(pwm_chip, pwm);
+ if (lpg == NULL) {
+ dev_err(pwm_chip->dev, "lpg not found\n");
+ return -ENODEV;
+ }
+
+ return qpnp_lpg_config(lpg, duty_ns, period_ns);
+}
+
+static int qpnp_lpg_pwm_src_enable(struct qpnp_lpg_channel *lpg, bool en)
+{
+ struct qpnp_lpg_chip *chip = lpg->chip;
+ struct qpnp_lpg_lut *lut = chip->lut;
+ struct pwm_device *pwm;
+ u8 mask, val;
+ int i, lpg_idx, rc;
+
+ mask = LPG_PWM_SRC_SELECT_MASK | LPG_EN_LPG_OUT_BIT |
+ LPG_EN_RAMP_GEN_MASK;
+ val = lpg->src_sel << LPG_PWM_SRC_SELECT_SHIFT;
+
+ if (lpg->src_sel == LUT_PATTERN)
+ val |= 1 << LPG_EN_RAMP_GEN_SHIFT;
+
+ if (en)
+ val |= 1 << LPG_EN_LPG_OUT_SHIFT;
+
+ rc = qpnp_lpg_masked_write(lpg, REG_LPG_ENABLE_CONTROL, mask, val);
+ if (rc < 0) {
+ dev_err(chip->dev, "Write LPG_ENABLE_CONTROL failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (lpg->src_sel == LUT_PATTERN && en) {
+ val = 1 << lpg->lpg_idx;
+ for (i = 0; i < chip->num_lpgs; i++) {
+ if (chip->lpg_group == NULL)
+ break;
+ if (chip->lpg_group[i] == 0)
+ break;
+ lpg_idx = chip->lpg_group[i] - 1;
+ pwm = &chip->pwm_chip.pwms[lpg_idx];
+ if ((pwm_get_output_type(pwm) == PWM_OUTPUT_MODULATED)
+ && pwm_is_enabled(pwm)) {
+ rc = qpnp_lpg_masked_write(&chip->lpgs[lpg_idx],
+ REG_LPG_ENABLE_CONTROL,
+ LPG_EN_LPG_OUT_BIT, 0);
+ if (rc < 0)
+ break;
+ rc = qpnp_lpg_masked_write(&chip->lpgs[lpg_idx],
+ REG_LPG_ENABLE_CONTROL,
+ LPG_EN_LPG_OUT_BIT,
+ LPG_EN_LPG_OUT_BIT);
+ if (rc < 0)
+ break;
+ val |= 1 << lpg_idx;
+ }
+ }
+ mutex_lock(&lut->lock);
+ rc = qpnp_lut_write(lut, REG_LPG_LUT_RAMP_CONTROL, val);
+ if (rc < 0)
+ dev_err(chip->dev, "Write LPG_LUT_RAMP_CONTROL failed, rc=%d\n",
+ rc);
+ mutex_unlock(&lut->lock);
+ }
+
+ return rc;
+}
+
+static int qpnp_lpg_pwm_set_output_type(struct pwm_chip *pwm_chip,
+ struct pwm_device *pwm, enum pwm_output_type output_type)
+{
+ struct qpnp_lpg_channel *lpg;
+ enum lpg_src src_sel;
+ int rc;
+
+ lpg = pwm_dev_to_qpnp_lpg(pwm_chip, pwm);
+ if (lpg == NULL) {
+ dev_err(pwm_chip->dev, "lpg not found\n");
+ return -ENODEV;
+ }
+
+ if (lpg->chip->lut == NULL) {
+ pr_debug("lpg%d only support PWM mode\n", lpg->lpg_idx);
+ return 0;
+ }
+
+ src_sel = (output_type == PWM_OUTPUT_MODULATED) ?
+ LUT_PATTERN : PWM_VALUE;
+ if (src_sel == lpg->src_sel)
+ return 0;
+
+ if (src_sel == LUT_PATTERN) {
+ /* program LUT if it's never been programmed */
+ if (!lpg->lut_written) {
+ rc = qpnp_lpg_set_lut_pattern(lpg,
+ lpg->ramp_config.pattern,
+ lpg->ramp_config.pattern_length);
+ if (rc < 0) {
+ dev_err(lpg->chip->dev, "set LUT pattern failed for LPG%d, rc=%d\n",
+ lpg->lpg_idx, rc);
+ return rc;
+ }
+ lpg->lut_written = true;
+ }
+
+ rc = qpnp_lpg_set_ramp_config(lpg);
+ if (rc < 0) {
+ dev_err(pwm_chip->dev, "Config LPG%d ramping failed, rc=%d\n",
+ lpg->lpg_idx, rc);
+ return rc;
+ }
+ }
+
+ lpg->src_sel = src_sel;
+
+ if (pwm_is_enabled(pwm)) {
+ rc = qpnp_lpg_pwm_src_enable(lpg, true);
+ if (rc < 0) {
+ dev_err(pwm_chip->dev, "Enable PWM output failed for channel %d, rc=%d\n",
+ lpg->lpg_idx, rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int qpnp_lpg_pwm_set_output_pattern(struct pwm_chip *pwm_chip,
+ struct pwm_device *pwm, struct pwm_output_pattern *output_pattern)
+{
+ struct qpnp_lpg_channel *lpg;
+ u64 period_ns, duty_ns, tmp;
+ u32 *percentages;
+ int rc = 0, i;
+
+ lpg = pwm_dev_to_qpnp_lpg(pwm_chip, pwm);
+ if (lpg == NULL) {
+ dev_err(pwm_chip->dev, "lpg not found\n");
+ return -ENODEV;
+ }
+
+ if (output_pattern->num_entries > lpg->max_pattern_length) {
+ dev_err(lpg->chip->dev, "pattern length %d shouldn't exceed %d\n",
+ output_pattern->num_entries,
+ lpg->max_pattern_length);
+ return -EINVAL;
+ }
+
+ percentages = kcalloc(output_pattern->num_entries,
+ sizeof(u32), GFP_KERNEL);
+ if (!percentages)
+ return -ENOMEM;
+
+ period_ns = pwm_get_period_extend(pwm);
+ for (i = 0; i < output_pattern->num_entries; i++) {
+ duty_ns = output_pattern->duty_pattern[i];
+ if (duty_ns > period_ns) {
+ dev_err(lpg->chip->dev, "duty %lluns is larger than period %lluns\n",
+ duty_ns, period_ns);
+ goto err;
+ }
+ /* Translate the pattern in duty_ns to percentage */
+ tmp = (u64)duty_ns * 100;
+ percentages[i] = (u32)div64_u64(tmp, period_ns);
+ }
+
+ rc = qpnp_lpg_set_lut_pattern(lpg, percentages,
+ output_pattern->num_entries);
+ if (rc < 0) {
+ dev_err(lpg->chip->dev, "Set LUT pattern failed for LPG%d, rc=%d\n",
+ lpg->lpg_idx, rc);
+ goto err;
+ }
+
+ lpg->lut_written = true;
+ memcpy(lpg->ramp_config.pattern, percentages,
+ output_pattern->num_entries);
+ lpg->ramp_config.hi_idx = lpg->ramp_config.lo_idx +
+ output_pattern->num_entries - 1;
+
+ tmp = (u64)output_pattern->cycles_per_duty * period_ns;
+ do_div(tmp, NSEC_PER_MSEC);
+ lpg->ramp_config.step_ms = (u16)tmp;
+
+ rc = qpnp_lpg_set_ramp_config(lpg);
+ if (rc < 0)
+ dev_err(pwm_chip->dev, "Config LPG%d ramping failed, rc=%d\n",
+ lpg->lpg_idx, rc);
+err:
+ kfree(percentages);
+
+ return rc;
+}
+
+static int qpnp_lpg_pwm_enable(struct pwm_chip *pwm_chip,
+ struct pwm_device *pwm)
+{
+ struct qpnp_lpg_channel *lpg;
+ int rc = 0;
+
+ lpg = pwm_dev_to_qpnp_lpg(pwm_chip, pwm);
+ if (lpg == NULL) {
+ dev_err(pwm_chip->dev, "lpg not found\n");
+ return -ENODEV;
+ }
+
+ rc = qpnp_lpg_set_glitch_removal(lpg, true);
+ if (rc < 0) {
+ dev_err(lpg->chip->dev, "Enable glitch-removal failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = qpnp_lpg_pwm_src_enable(lpg, true);
+ if (rc < 0)
+ dev_err(pwm_chip->dev, "Enable PWM output failed for channel %d, rc=%d\n",
+ lpg->lpg_idx, rc);
+
+ return rc;
+}
+
+static void qpnp_lpg_pwm_disable(struct pwm_chip *pwm_chip,
+ struct pwm_device *pwm)
+{
+ struct qpnp_lpg_channel *lpg;
+ int rc;
+
+ lpg = pwm_dev_to_qpnp_lpg(pwm_chip, pwm);
+ if (lpg == NULL) {
+ dev_err(pwm_chip->dev, "lpg not found\n");
+ return;
+ }
+
+ rc = qpnp_lpg_pwm_src_enable(lpg, false);
+ if (rc < 0) {
+ dev_err(pwm_chip->dev, "Disable PWM output failed for channel %d, rc=%d\n",
+ lpg->lpg_idx, rc);
+ return;
+ }
+
+ rc = qpnp_lpg_set_glitch_removal(lpg, false);
+ if (rc < 0)
+ dev_err(lpg->chip->dev, "Disable glitch-removal failed, rc=%d\n",
+ rc);
+}
+
+static int qpnp_lpg_pwm_output_types_supported(struct pwm_chip *pwm_chip,
+ struct pwm_device *pwm)
+{
+ enum pwm_output_type type = PWM_OUTPUT_FIXED;
+ struct qpnp_lpg_channel *lpg;
+
+ lpg = pwm_dev_to_qpnp_lpg(pwm_chip, pwm);
+ if (lpg == NULL) {
+ dev_err(pwm_chip->dev, "lpg not found\n");
+ return type;
+ }
+
+ if (lpg->chip->lut != NULL)
+ type |= PWM_OUTPUT_MODULATED;
+
+ return type;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void qpnp_lpg_pwm_dbg_show(struct pwm_chip *pwm_chip, struct seq_file *s)
+{
+ struct qpnp_lpg_channel *lpg;
+ struct lpg_pwm_config *cfg;
+ struct lpg_ramp_config *ramp;
+ struct pwm_device *pwm;
+ int i, j;
+
+ for (i = 0; i < pwm_chip->npwm; i++) {
+ pwm = &pwm_chip->pwms[i];
+
+ lpg = pwm_dev_to_qpnp_lpg(pwm_chip, pwm);
+ if (lpg == NULL) {
+ dev_err(pwm_chip->dev, "lpg not found\n");
+ return;
+ }
+
+ if (test_bit(PWMF_REQUESTED, &pwm->flags)) {
+ seq_printf(s, "LPG %d is requested by %s\n",
+ lpg->lpg_idx + 1, pwm->label);
+ } else {
+ seq_printf(s, "LPG %d is free\n",
+ lpg->lpg_idx + 1);
+ continue;
+ }
+
+ if (pwm_is_enabled(pwm)) {
+ seq_puts(s, " enabled\n");
+ } else {
+ seq_puts(s, " disabled\n");
+ continue;
+ }
+
+ cfg = &lpg->pwm_config;
+ seq_printf(s, " clk = %dHz\n", cfg->pwm_clk);
+ seq_printf(s, " pwm_size = %d\n", cfg->pwm_size);
+ seq_printf(s, " prediv = %d\n", cfg->prediv);
+ seq_printf(s, " exponent = %d\n", cfg->clk_exp);
+ seq_printf(s, " pwm_value = %d\n", cfg->pwm_value);
+ seq_printf(s, " Requested period: %lluns, best period = %lluns\n",
+ pwm_get_period_extend(pwm), cfg->best_period_ns);
+
+ ramp = &lpg->ramp_config;
+ if (pwm_get_output_type(pwm) == PWM_OUTPUT_MODULATED) {
+ seq_puts(s, " ramping duty percentages:");
+ for (j = 0; j < ramp->pattern_length; j++)
+ seq_printf(s, " %d", ramp->pattern[j]);
+ seq_puts(s, "\n");
+ seq_printf(s, " ramping time per step: %dms\n",
+ ramp->step_ms);
+ seq_printf(s, " ramping low index: %d\n",
+ ramp->lo_idx);
+ seq_printf(s, " ramping high index: %d\n",
+ ramp->hi_idx);
+ seq_printf(s, " ramping from low to high: %d\n",
+ ramp->ramp_dir_low_to_hi);
+ seq_printf(s, " ramping pattern repeat: %d\n",
+ ramp->pattern_repeat);
+ seq_printf(s, " ramping toggle: %d\n",
+ ramp->toggle);
+ seq_printf(s, " ramping pause count at low index: %d\n",
+ ramp->pause_lo_count);
+ seq_printf(s, " ramping pause count at high index: %d\n",
+ ramp->pause_hi_count);
+ }
+ }
+}
+#endif
+
+static const struct pwm_ops qpnp_lpg_pwm_ops = {
+ .config = qpnp_lpg_pwm_config,
+ .config_extend = qpnp_lpg_pwm_config_extend,
+ .get_output_type_supported = qpnp_lpg_pwm_output_types_supported,
+ .set_output_type = qpnp_lpg_pwm_set_output_type,
+ .set_output_pattern = qpnp_lpg_pwm_set_output_pattern,
+ .enable = qpnp_lpg_pwm_enable,
+ .disable = qpnp_lpg_pwm_disable,
+#ifdef CONFIG_DEBUG_FS
+ .dbg_show = qpnp_lpg_pwm_dbg_show,
+#endif
+ .owner = THIS_MODULE,
+};
+
+static int qpnp_lpg_parse_dt(struct qpnp_lpg_chip *chip)
+{
+ struct device_node *child;
+ struct qpnp_lpg_channel *lpg;
+ struct lpg_ramp_config *ramp;
+ int rc = 0, i;
+ u32 base, length, lpg_chan_id, tmp;
+ const __be32 *addr;
+
+ addr = of_get_address(chip->dev->of_node, 0, NULL, NULL);
+ if (!addr) {
+ dev_err(chip->dev, "Get %s address failed\n", LPG_BASE);
+ return -EINVAL;
+ }
+
+ base = be32_to_cpu(addr[0]);
+ length = be32_to_cpu(addr[1]);
+
+ chip->num_lpgs = length / REG_SIZE_PER_LPG;
+ chip->lpgs = devm_kcalloc(chip->dev, chip->num_lpgs,
+ sizeof(*chip->lpgs), GFP_KERNEL);
+ if (!chip->lpgs)
+ return -ENOMEM;
+
+ for (i = 0; i < chip->num_lpgs; i++) {
+ chip->lpgs[i].chip = chip;
+ chip->lpgs[i].lpg_idx = i;
+ chip->lpgs[i].reg_base = base + i * REG_SIZE_PER_LPG;
+ chip->lpgs[i].src_sel = PWM_VALUE;
+ rc = qpnp_lpg_read(&chip->lpgs[i], REG_LPG_PERPH_SUBTYPE,
+ &chip->lpgs[i].subtype);
+ if (rc < 0) {
+ dev_err(chip->dev, "Read subtype failed, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ addr = of_get_address(chip->dev->of_node, 1, NULL, NULL);
+ if (!addr) {
+ pr_debug("NO LUT address assigned\n");
+ return 0;
+ }
+
+ chip->lut = devm_kmalloc(chip->dev, sizeof(*chip->lut), GFP_KERNEL);
+ if (!chip->lut)
+ return -ENOMEM;
+
+ chip->lut->chip = chip;
+ chip->lut->reg_base = be32_to_cpu(*addr);
+ mutex_init(&chip->lut->lock);
+
+ rc = of_property_count_elems_of_size(chip->dev->of_node,
+ "qcom,lut-patterns", sizeof(u32));
+ if (rc < 0) {
+ dev_err(chip->dev, "Read qcom,lut-patterns failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ length = rc;
+ if (length > LPG_LUT_COUNT_MAX) {
+ dev_err(chip->dev, "qcom,lut-patterns length %d exceed max %d\n",
+ length, LPG_LUT_COUNT_MAX);
+ return -EINVAL;
+ }
+
+ chip->lut->pattern = devm_kcalloc(chip->dev, LPG_LUT_COUNT_MAX,
+ sizeof(*chip->lut->pattern), GFP_KERNEL);
+ if (!chip->lut->pattern)
+ return -ENOMEM;
+
+ rc = of_property_read_u32_array(chip->dev->of_node, "qcom,lut-patterns",
+ chip->lut->pattern, length);
+ if (rc < 0) {
+ dev_err(chip->dev, "Get qcom,lut-patterns failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (of_get_available_child_count(chip->dev->of_node) == 0) {
+ dev_err(chip->dev, "No ramp configuration for any LPG\n");
+ return -EINVAL;
+ }
+
+ for_each_available_child_of_node(chip->dev->of_node, child) {
+ rc = of_property_read_u32(child, "qcom,lpg-chan-id",
+ &lpg_chan_id);
+ if (rc < 0) {
+ dev_err(chip->dev, "Get qcom,lpg-chan-id failed for node %s, rc=%d\n",
+ child->name, rc);
+ return rc;
+ }
+
+ if (lpg_chan_id > chip->num_lpgs) {
+ dev_err(chip->dev, "lpg-chann-id %d is out of range 1~%d\n",
+ lpg_chan_id, chip->num_lpgs);
+ return -EINVAL;
+ }
+
+ /* lpg channel id is indexed from 1 in hardware */
+ lpg = &chip->lpgs[lpg_chan_id - 1];
+ ramp = &lpg->ramp_config;
+
+ rc = of_property_read_u32(child, "qcom,ramp-step-ms", &tmp);
+ if (rc < 0) {
+ dev_err(chip->dev, "get qcom,ramp-step-ms failed for lpg%d, rc=%d\n",
+ lpg_chan_id, rc);
+ return rc;
+ }
+ ramp->step_ms = (u16)tmp;
+
+ rc = of_property_read_u32(child, "qcom,ramp-low-index", &tmp);
+ if (rc < 0) {
+ dev_err(chip->dev, "get qcom,ramp-low-index failed for lpg%d, rc=%d\n",
+ lpg_chan_id, rc);
+ return rc;
+ }
+ ramp->lo_idx = (u8)tmp;
+ if (ramp->lo_idx >= LPG_LUT_COUNT_MAX) {
+ dev_err(chip->dev, "qcom,ramp-low-index should less than max %d\n",
+ LPG_LUT_COUNT_MAX);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(child, "qcom,ramp-high-index", &tmp);
+ if (rc < 0) {
+ dev_err(chip->dev, "get qcom,ramp-high-index failed for lpg%d, rc=%d\n",
+ lpg_chan_id, rc);
+ return rc;
+ }
+ ramp->hi_idx = (u8)tmp;
+
+ if (ramp->hi_idx > LPG_LUT_COUNT_MAX) {
+ dev_err(chip->dev, "qcom,ramp-high-index shouldn't exceed max %d\n",
+ LPG_LUT_COUNT_MAX);
+ return -EINVAL;
+ }
+
+ if (ramp->hi_idx <= ramp->lo_idx) {
+ dev_err(chip->dev, "high-index(%d) should be larger than low-index(%d)\n",
+ ramp->hi_idx, ramp->lo_idx);
+ return -EINVAL;
+ }
+
+ ramp->pattern_length = ramp->hi_idx - ramp->lo_idx + 1;
+ ramp->pattern = &chip->lut->pattern[ramp->lo_idx];
+ lpg->max_pattern_length = ramp->pattern_length;
+
+ rc = of_property_read_u32(child,
+ "qcom,ramp-pause-hi-count", &tmp);
+ if (rc < 0)
+ ramp->pause_hi_count = 0;
+ else
+ ramp->pause_hi_count = (u8)tmp;
+
+ rc = of_property_read_u32(child,
+ "qcom,ramp-pause-lo-count", &tmp);
+ if (rc < 0)
+ ramp->pause_lo_count = 0;
+ else
+ ramp->pause_lo_count = (u8)tmp;
+
+ ramp->ramp_dir_low_to_hi = of_property_read_bool(child,
+ "qcom,ramp-from-low-to-high");
+
+ ramp->pattern_repeat = of_property_read_bool(child,
+ "qcom,ramp-pattern-repeat");
+
+ ramp->toggle = of_property_read_bool(child,
+ "qcom,ramp-toggle");
+ }
+
+ rc = of_property_count_elems_of_size(chip->dev->of_node,
+ "qcom,sync-channel-ids", sizeof(u32));
+ if (rc < 0)
+ return 0;
+
+ length = rc;
+ if (length > chip->num_lpgs) {
+ dev_err(chip->dev, "qcom,sync-channel-ids has too many channels: %d\n",
+ length);
+ return -EINVAL;
+ }
+
+ chip->lpg_group = devm_kcalloc(chip->dev, chip->num_lpgs,
+ sizeof(u32), GFP_KERNEL);
+ if (!chip->lpg_group)
+ return -ENOMEM;
+
+ rc = of_property_read_u32_array(chip->dev->of_node,
+ "qcom,sync-channel-ids", chip->lpg_group, length);
+ if (rc < 0) {
+ dev_err(chip->dev, "Get qcom,sync-channel-ids failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ for (i = 0; i < length; i++) {
+ if (chip->lpg_group[i] <= 0 ||
+ chip->lpg_group[i] > chip->num_lpgs) {
+ dev_err(chip->dev, "lpg_group[%d]: %d is not a valid channel\n",
+ i, chip->lpg_group[i]);
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * The LPG channel in the same group should have the same ramping
+ * configuration, so force to use the ramping configuration of the
+ * 1st LPG channel in the group for sychronization.
+ */
+ lpg = &chip->lpgs[chip->lpg_group[0] - 1];
+ ramp = &lpg->ramp_config;
+
+ for (i = 1; i < length; i++) {
+ lpg = &chip->lpgs[chip->lpg_group[i] - 1];
+ memcpy(&lpg->ramp_config, ramp, sizeof(struct lpg_ramp_config));
+ }
+
+ return 0;
+}
+
+static int qpnp_lpg_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct qpnp_lpg_chip *chip;
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->dev = &pdev->dev;
+ chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
+ if (!chip->regmap) {
+ dev_err(chip->dev, "Getting regmap failed\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&chip->bus_lock);
+ rc = qpnp_lpg_parse_dt(chip);
+ if (rc < 0) {
+ dev_err(chip->dev, "Devicetree properties parsing failed, rc=%d\n",
+ rc);
+ goto err_out;
+ }
+
+ dev_set_drvdata(chip->dev, chip);
+ chip->pwm_chip.dev = chip->dev;
+ chip->pwm_chip.base = -1;
+ chip->pwm_chip.npwm = chip->num_lpgs;
+ chip->pwm_chip.ops = &qpnp_lpg_pwm_ops;
+
+ rc = pwmchip_add(&chip->pwm_chip);
+ if (rc < 0) {
+ dev_err(chip->dev, "Add pwmchip failed, rc=%d\n", rc);
+ goto err_out;
+ }
+
+ return 0;
+err_out:
+ mutex_destroy(&chip->bus_lock);
+ return rc;
+}
+
+static int qpnp_lpg_remove(struct platform_device *pdev)
+{
+ struct qpnp_lpg_chip *chip = dev_get_drvdata(&pdev->dev);
+ int rc = 0;
+
+ rc = pwmchip_remove(&chip->pwm_chip);
+ if (rc < 0)
+ dev_err(chip->dev, "Remove pwmchip failed, rc=%d\n", rc);
+
+ mutex_destroy(&chip->bus_lock);
+ dev_set_drvdata(chip->dev, NULL);
+
+ return rc;
+}
+
+static const struct of_device_id qpnp_lpg_of_match[] = {
+ { .compatible = "qcom,pwm-lpg",},
+ { },
+};
+
+static struct platform_driver qpnp_lpg_driver = {
+ .driver = {
+ .name = "qcom,pwm-lpg",
+ .of_match_table = qpnp_lpg_of_match,
+ },
+ .probe = qpnp_lpg_probe,
+ .remove = qpnp_lpg_remove,
+};
+module_platform_driver(qpnp_lpg_driver);
+
+MODULE_DESCRIPTION("QTI LPG driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("pwm:pwm-lpg");
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index a352fd4..3e16c6b 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2967,6 +2967,9 @@ void usb_remove_hcd(struct usb_hcd *hcd)
cancel_work_sync(&hcd->wakeup_work);
#endif
+ /* handle any pending hub events before XHCI stops */
+ usb_flush_hub_wq();
+
mutex_lock(&usb_bus_idr_lock);
usb_disconnect(&rhdev); /* Sets rhdev to NULL */
mutex_unlock(&usb_bus_idr_lock);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 462ce49..a81a627 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -641,6 +641,12 @@ void usb_kick_hub_wq(struct usb_device *hdev)
kick_hub_wq(hub);
}
+void usb_flush_hub_wq(void)
+{
+ flush_workqueue(hub_wq);
+}
+EXPORT_SYMBOL(usb_flush_hub_wq);
+
/*
* Let the USB core know that a USB 3.0 device has sent a Function Wake Device
* Notification, which indicates it had initiated remote wakeup.
diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c
index 9e3e83a..51f20e6 100644
--- a/drivers/usb/gadget/function/f_qdss.c
+++ b/drivers/usb/gadget/function/f_qdss.c
@@ -518,6 +518,8 @@ static void qdss_unbind(struct usb_configuration *c, struct usb_function *f)
flush_workqueue(qdss->wq);
+ qdss->debug_inface_enabled = 0;
+
clear_eps(f);
clear_desc(gadget, f);
}
@@ -1092,7 +1094,7 @@ static ssize_t qdss_enable_debug_inface_store(struct config_item *item,
}
spin_lock_irqsave(&qdss->lock, flags);
- qdss->debug_inface_enabled = (stats == 1 ? "true" : "false");
+ qdss->debug_inface_enabled = (stats == 1 ? true : false);
spin_unlock_irqrestore(&qdss->lock, flags);
return len;
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index c0df8cf..b263ee9 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -727,8 +727,6 @@ static void xhci_stop(struct usb_hcd *hcd)
/* Only halt host and free memory after both hcds are removed */
if (!usb_hcd_is_primary_hcd(hcd)) {
- /* usb core will free this hcd shortly, unset pointer */
- xhci->shared_hcd = NULL;
mutex_unlock(&xhci->mutex);
return;
}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index f8bee4cb..0a211f1 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -437,8 +437,8 @@ struct xhci_op_regs {
#define PORT_L1_TIMEOUT(p)(((p) & 0xff) << 2)
#define PORT_BESLD(p)(((p) & 0xf) << 10)
-/* use 512 microseconds as USB2 LPM L1 default timeout. */
-#define XHCI_L1_TIMEOUT 512
+/* use 128 microseconds as USB2 LPM L1 default timeout. */
+#define XHCI_L1_TIMEOUT 128
/* Set default HIRD/BESL value to 4 (350/400us) for USB2 L1 LPM resume latency.
* Safe to use with mixed HIRD and BESL systems (host and device) and is used
diff --git a/include/dt-bindings/input/qcom,qpnp-power-on.h b/include/dt-bindings/input/qcom,qpnp-power-on.h
new file mode 100644
index 0000000..e940d4c
--- /dev/null
+++ b/include/dt-bindings/input/qcom,qpnp-power-on.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_INPUT_QCOM_POWER_ON_H
+#define _DT_BINDINGS_INPUT_QCOM_POWER_ON_H
+
+/* PMIC PON peripheral logical power on types: */
+#define PON_POWER_ON_TYPE_KPDPWR 0
+#define PON_POWER_ON_TYPE_RESIN 1
+#define PON_POWER_ON_TYPE_CBLPWR 2
+#define PON_POWER_ON_TYPE_KPDPWR_RESIN 3
+
+/* PMIC PON peripheral physical power off types: */
+#define PON_POWER_OFF_TYPE_WARM_RESET 0x01
+#define PON_POWER_OFF_TYPE_SHUTDOWN 0x04
+#define PON_POWER_OFF_TYPE_DVDD_SHUTDOWN 0x05
+#define PON_POWER_OFF_TYPE_HARD_RESET 0x07
+#define PON_POWER_OFF_TYPE_DVDD_HARD_RESET 0x08
+
+#endif
diff --git a/include/linux/batterydata-lib.h b/include/linux/batterydata-lib.h
new file mode 100644
index 0000000..43e4b62
--- /dev/null
+++ b/include/linux/batterydata-lib.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2012-2015, 2017 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __BMS_BATTERYDATA_H
+#define __BMS_BATTERYDATA_H
+
+#include <linux/errno.h>
+
+#define FCC_CC_COLS 5
+#define FCC_TEMP_COLS 8
+
+#define PC_CC_ROWS 31
+#define PC_CC_COLS 13
+
+#define PC_TEMP_ROWS 31
+#define PC_TEMP_COLS 8
+
+#define ACC_IBAT_ROWS 4
+#define ACC_TEMP_COLS 3
+
+#define MAX_SINGLE_LUT_COLS 20
+
+#define MAX_BATT_ID_NUM 4
+#define DEGC_SCALE 10
+
+struct single_row_lut {
+ int x[MAX_SINGLE_LUT_COLS];
+ int y[MAX_SINGLE_LUT_COLS];
+ int cols;
+};
+
+/**
+ * struct sf_lut -
+ * @rows: number of percent charge entries should be <= PC_CC_ROWS
+ * @cols: number of charge cycle entries should be <= PC_CC_COLS
+ * @row_entries: the charge cycles/temperature at which sf data
+ * is available in the table.
+ * The charge cycles must be in increasing order from 0 to rows.
+ * @percent: the percent charge at which sf data is available in the table
+ * The percentcharge must be in decreasing order from 0 to cols.
+ * @sf: the scaling factor data
+ */
+struct sf_lut {
+ int rows;
+ int cols;
+ int row_entries[PC_CC_COLS];
+ int percent[PC_CC_ROWS];
+ int sf[PC_CC_ROWS][PC_CC_COLS];
+};
+
+/**
+ * struct pc_temp_ocv_lut -
+ * @rows: number of percent charge entries should be <= PC_TEMP_ROWS
+ * @cols: number of temperature entries should be <= PC_TEMP_COLS
+ * @temp: the temperatures at which ocv data is available in the table
+ * The temperatures must be in increasing order from 0 to rows.
+ * @percent: the percent charge at which ocv data is available in the table
+ * The percentcharge must be in decreasing order from 0 to cols.
+ * @ocv: the open circuit voltage
+ */
+struct pc_temp_ocv_lut {
+ int rows;
+ int cols;
+ int temp[PC_TEMP_COLS];
+ int percent[PC_TEMP_ROWS];
+ int ocv[PC_TEMP_ROWS][PC_TEMP_COLS];
+};
+
+struct ibat_temp_acc_lut {
+ int rows;
+ int cols;
+ int temp[ACC_TEMP_COLS];
+ int ibat[ACC_IBAT_ROWS];
+ int acc[ACC_IBAT_ROWS][ACC_TEMP_COLS];
+};
+
+struct batt_ids {
+ int kohm[MAX_BATT_ID_NUM];
+ int num;
+};
+
+enum battery_type {
+ BATT_UNKNOWN = 0,
+ BATT_PALLADIUM,
+ BATT_DESAY,
+ BATT_OEM,
+ BATT_QRD_4V35_2000MAH,
+ BATT_QRD_4V2_1300MAH,
+};
+
+/**
+ * struct bms_battery_data -
+ * @fcc: full charge capacity (mAmpHour)
+ * @fcc_temp_lut: table to get fcc at a given temp
+ * @pc_temp_ocv_lut: table to get percent charge given batt temp and cycles
+ * @pc_sf_lut: table to get percent charge scaling factor given cycles
+ * and percent charge
+ * @rbatt_sf_lut: table to get battery resistance scaling factor given
+ * temperature and percent charge
+ * @default_rbatt_mohm: the default value of battery resistance to use when
+ * readings from bms are not available.
+ * @delta_rbatt_mohm: the resistance to be added towards lower soc to
+ * compensate for battery capacitance.
+ * @rbatt_capacitve_mohm: the resistance to be added to compensate for
+ * battery capacitance
+ * @flat_ocv_threshold_uv: the voltage where the battery's discharge curve
+ * starts flattening out.
+ * @max_voltage_uv: max voltage of the battery
+ * @cutoff_uv: cutoff voltage of the battery
+ * @iterm_ua: termination current of the battery when charging
+ * to 100%
+ * @batt_id_kohm: the best matched battery id resistor value
+ * @fastchg_current_ma: maximum fast charge current
+ * @fg_cc_cv_threshold_mv: CC to CV threashold voltage
+ */
+
+struct bms_battery_data {
+ unsigned int fcc;
+ struct single_row_lut *fcc_temp_lut;
+ struct single_row_lut *fcc_sf_lut;
+ struct pc_temp_ocv_lut *pc_temp_ocv_lut;
+ struct ibat_temp_acc_lut *ibat_acc_lut;
+ struct sf_lut *pc_sf_lut;
+ struct sf_lut *rbatt_sf_lut;
+ int default_rbatt_mohm;
+ int delta_rbatt_mohm;
+ int rbatt_capacitive_mohm;
+ int flat_ocv_threshold_uv;
+ int max_voltage_uv;
+ int cutoff_uv;
+ int iterm_ua;
+ int batt_id_kohm;
+ int fastchg_current_ma;
+ int fg_cc_cv_threshold_mv;
+ const char *battery_type;
+};
+
+#define is_between(left, right, value) \
+ (((left) >= (right) && (left) >= (value) \
+ && (value) >= (right)) \
+ || ((left) <= (right) && (left) <= (value) \
+ && (value) <= (right)))
+
+#if defined(CONFIG_PM8921_BMS) || \
+ defined(CONFIG_PM8921_BMS_MODULE) || \
+ defined(CONFIG_QPNP_BMS) || \
+ defined(CONFIG_QPNP_VM_BMS)
+extern struct bms_battery_data palladium_1500_data;
+extern struct bms_battery_data desay_5200_data;
+extern struct bms_battery_data oem_batt_data;
+extern struct bms_battery_data QRD_4v35_2000mAh_data;
+extern struct bms_battery_data qrd_4v2_1300mah_data;
+
+int interpolate_fcc(struct single_row_lut *fcc_temp_lut, int batt_temp);
+int interpolate_scalingfactor(struct sf_lut *sf_lut, int row_entry, int pc);
+int interpolate_scalingfactor_fcc(struct single_row_lut *fcc_sf_lut,
+ int cycles);
+int interpolate_pc(struct pc_temp_ocv_lut *pc_temp_ocv,
+ int batt_temp_degc, int ocv);
+int interpolate_ocv(struct pc_temp_ocv_lut *pc_temp_ocv,
+ int batt_temp_degc, int pc);
+int interpolate_slope(struct pc_temp_ocv_lut *pc_temp_ocv,
+ int batt_temp, int pc);
+int interpolate_acc(struct ibat_temp_acc_lut *ibat_acc_lut,
+ int batt_temp, int ibat);
+int linear_interpolate(int y0, int x0, int y1, int x1, int x);
+#else
+static inline int interpolate_fcc(struct single_row_lut *fcc_temp_lut,
+ int batt_temp)
+{
+ return -EINVAL;
+}
+static inline int interpolate_scalingfactor(struct sf_lut *sf_lut,
+ int row_entry, int pc)
+{
+ return -EINVAL;
+}
+static inline int interpolate_scalingfactor_fcc(
+ struct single_row_lut *fcc_sf_lut, int cycles)
+{
+ return -EINVAL;
+}
+static inline int interpolate_pc(struct pc_temp_ocv_lut *pc_temp_ocv,
+ int batt_temp_degc, int ocv)
+{
+ return -EINVAL;
+}
+static inline int interpolate_ocv(struct pc_temp_ocv_lut *pc_temp_ocv,
+ int batt_temp_degc, int pc)
+{
+ return -EINVAL;
+}
+static inline int interpolate_slope(struct pc_temp_ocv_lut *pc_temp_ocv,
+ int batt_temp, int pc)
+{
+ return -EINVAL;
+}
+static inline int linear_interpolate(int y0, int x0, int y1, int x1, int x)
+{
+ return -EINVAL;
+}
+static inline int interpolate_acc(struct ibat_temp_acc_lut *ibat_acc_lut,
+ int batt_temp, int ibat)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif
diff --git a/include/linux/msm_pcie.h b/include/linux/msm_pcie.h
new file mode 100644
index 0000000..f2d2d1b
--- /dev/null
+++ b/include/linux/msm_pcie.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.*/
+
+#ifndef __MSM_PCIE_H
+#define __MSM_PCIE_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+
+enum msm_pcie_config {
+ MSM_PCIE_CONFIG_INVALID = 0,
+ MSM_PCIE_CONFIG_NO_CFG_RESTORE = 0x1,
+ MSM_PCIE_CONFIG_LINKDOWN = 0x2,
+ MSM_PCIE_CONFIG_NO_RECOVERY = 0x4,
+};
+
+enum msm_pcie_pm_opt {
+ MSM_PCIE_SUSPEND,
+ MSM_PCIE_RESUME,
+ MSM_PCIE_DISABLE_PC,
+ MSM_PCIE_ENABLE_PC,
+};
+
+enum msm_pcie_event {
+ MSM_PCIE_EVENT_INVALID = 0,
+ MSM_PCIE_EVENT_LINKDOWN = 0x1,
+ MSM_PCIE_EVENT_LINKUP = 0x2,
+ MSM_PCIE_EVENT_WAKEUP = 0x4,
+};
+
+enum msm_pcie_trigger {
+ MSM_PCIE_TRIGGER_CALLBACK,
+ MSM_PCIE_TRIGGER_COMPLETION,
+};
+
+struct msm_pcie_notify {
+ enum msm_pcie_event event;
+ void *user;
+ void *data;
+ u32 options;
+};
+
+struct msm_pcie_register_event {
+ u32 events;
+ void *user;
+ enum msm_pcie_trigger mode;
+ void (*callback)(struct msm_pcie_notify *notify);
+ struct msm_pcie_notify notify;
+ struct completion *completion;
+ u32 options;
+};
+
+#ifdef CONFIG_PCI_MSM_MSI
+int msm_msi_init(struct device *dev);
+#else
+static inline int msm_msi_init(struct device *dev)
+{
+ return -EINVAL;
+}
+#endif
+
+#ifdef CONFIG_PCI_MSM
+/**
+ * msm_pcie_pm_control - control the power state of a PCIe link.
+ * @pm_opt: power management operation
+ * @busnr: bus number of PCIe endpoint
+ * @user: handle of the caller
+ * @data: private data from the caller
+ * @options: options for pm control
+ *
+ * This function gives PCIe endpoint device drivers the control to change
+ * the power state of a PCIe link for their device.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr, void *user,
+ void *data, u32 options);
+
+/**
+ * msm_pcie_register_event - register an event with PCIe bus driver.
+ * @reg: event structure
+ *
+ * This function gives PCIe endpoint device drivers an option to register
+ * events with PCIe bus driver.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int msm_pcie_register_event(struct msm_pcie_register_event *reg);
+
+/**
+ * msm_pcie_deregister_event - deregister an event with PCIe bus driver.
+ * @reg: event structure
+ *
+ * This function gives PCIe endpoint device drivers an option to deregister
+ * events with PCIe bus driver.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int msm_pcie_deregister_event(struct msm_pcie_register_event *reg);
+
+/**
+ * msm_pcie_recover_config - recover config space.
+ * @dev: pci device structure
+ *
+ * This function recovers the config space of both RC and Endpoint.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int msm_pcie_recover_config(struct pci_dev *dev);
+
+/**
+ * msm_pcie_enumerate - enumerate Endpoints.
+ * @rc_idx: RC that Endpoints connect to.
+ *
+ * This function enumerates Endpoints connected to RC.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int msm_pcie_enumerate(u32 rc_idx);
+
+/**
+ * msm_pcie_recover_config - recover config space.
+ * @dev: pci device structure
+ *
+ * This function recovers the config space of both RC and Endpoint.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int msm_pcie_recover_config(struct pci_dev *dev);
+
+/**
+ * msm_pcie_shadow_control - control the shadowing of PCIe config space.
+ * @dev: pci device structure
+ * @enable: shadowing should be enabled or disabled
+ *
+ * This function gives PCIe endpoint device drivers the control to enable
+ * or disable the shadowing of PCIe config space.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int msm_pcie_shadow_control(struct pci_dev *dev, bool enable);
+
+/*
+ * msm_pcie_debug_info - run a PCIe specific debug testcase.
+ * @dev: pci device structure
+ * @option: specifies which PCIe debug testcase to execute
+ * @base: PCIe specific range
+ * @offset: offset of destination register
+ * @mask: mask the bit(s) of destination register
+ * @value: value to be written to destination register
+ *
+ * This function gives PCIe endpoint device drivers the control to
+ * run a debug testcase.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int msm_pcie_debug_info(struct pci_dev *dev, u32 option, u32 base,
+ u32 offset, u32 mask, u32 value);
+
+#else /* !CONFIG_PCI_MSM */
+static inline int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr,
+ void *user, void *data, u32 options)
+{
+ return -ENODEV;
+}
+
+static inline int msm_pcie_register_event(struct msm_pcie_register_event *reg)
+{
+ return -ENODEV;
+}
+
+static inline int msm_pcie_deregister_event(struct msm_pcie_register_event *reg)
+{
+ return -ENODEV;
+}
+
+static inline int msm_pcie_recover_config(struct pci_dev *dev)
+{
+ return -ENODEV;
+}
+
+static inline int msm_pcie_enumerate(u32 rc_idx)
+{
+ return -ENODEV;
+}
+
+static inline int msm_pcie_shadow_control(struct pci_dev *dev, bool enable)
+{
+ return -ENODEV;
+}
+
+static inline int msm_pcie_debug_info(struct pci_dev *dev, u32 option, u32 base,
+ u32 offset, u32 mask, u32 value)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_PCI_MSM */
+
+#endif /* __MSM_PCIE_H */
diff --git a/include/linux/of_batterydata.h b/include/linux/of_batterydata.h
new file mode 100644
index 0000000..0faf7f8
--- /dev/null
+++ b/include/linux/of_batterydata.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2013-2014, 2016-2018 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/batterydata-lib.h>
+
+#ifdef CONFIG_OF_BATTERYDATA
+/**
+ * of_batterydata_read_data() - Populate battery data from the device tree
+ * @container_node: pointer to the battery-data container device node
+ * containing the profile nodes.
+ * @batt_data: pointer to an allocated bms_battery_data structure that the
+ * loaded profile will be written to.
+ * @batt_id_uv: ADC voltage of the battery id line used to differentiate
+ * between different battery profiles. If there are multiple
+ * battery data in the device tree, the one with the closest
+ * battery id resistance will be automatically loaded.
+ *
+ * This routine loads the closest match battery data from device tree based on
+ * the battery id reading. Then, it will try to load all the relevant data from
+ * the device tree battery data profile.
+ *
+ * If any of the lookup table pointers are NULL, this routine will skip trying
+ * to read them.
+ */
+int of_batterydata_read_data(struct device_node *container_node,
+ struct bms_battery_data *batt_data,
+ int batt_id_uv);
+/**
+ * of_batterydata_get_best_profile() - Find matching battery data device node
+ * @batterydata_container_node: pointer to the battery-data container device
+ * node containing the profile nodes.
+ * @batt_id_kohm: Battery ID in KOhms for which we want to find the profile.
+ * @batt_type: Battery type which we want to force load the profile.
+ *
+ * This routine returns a device_node pointer to the closest match battery data
+ * from device tree based on the battery id reading.
+ */
+struct device_node *of_batterydata_get_best_profile(
+ struct device_node *batterydata_container_node,
+ int batt_id_kohm, const char *batt_type);
+#else
+static inline int of_batterydata_read_data(struct device_node *container_node,
+ struct bms_battery_data *batt_data,
+ int batt_id_uv)
+{
+ return -ENXIO;
+}
+static inline struct device_node *of_batterydata_get_best_profile(
+ struct device_node *batterydata_container_node,
+ int batt_id_kohm, const char *batt_type)
+{
+ return NULL;
+}
+#endif /* CONFIG_OF_BATTERYDATA */
diff --git a/include/linux/pmic-voter.h b/include/linux/pmic-voter.h
new file mode 100644
index 0000000..9a783ce
--- /dev/null
+++ b/include/linux/pmic-voter.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __PMIC_VOTER_H
+#define __PMIC_VOTER_H
+
+#include <linux/mutex.h>
+
+struct votable;
+
+enum votable_type {
+ VOTE_MIN,
+ VOTE_MAX,
+ VOTE_SET_ANY,
+ NUM_VOTABLE_TYPES,
+};
+
+bool is_client_vote_enabled(struct votable *votable, const char *client_str);
+bool is_client_vote_enabled_locked(struct votable *votable,
+ const char *client_str);
+int get_client_vote(struct votable *votable, const char *client_str);
+int get_client_vote_locked(struct votable *votable, const char *client_str);
+int get_effective_result(struct votable *votable);
+int get_effective_result_locked(struct votable *votable);
+const char *get_effective_client(struct votable *votable);
+const char *get_effective_client_locked(struct votable *votable);
+int vote(struct votable *votable, const char *client_str, bool state, int val);
+int rerun_election(struct votable *votable);
+struct votable *find_votable(const char *name);
+struct votable *create_votable(const char *name,
+ int votable_type,
+ int (*callback)(struct votable *votable,
+ void *data,
+ int effective_result,
+ const char *effective_client),
+ void *data);
+void destroy_votable(struct votable *votable);
+void lock_votable(struct votable *votable);
+void unlock_votable(struct votable *votable);
+
+#endif /* __PMIC_VOTER_H */
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 97e2dde..abf9c92 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -497,7 +497,7 @@ extern void usb_hc_died(struct usb_hcd *hcd);
extern void usb_hcd_poll_rh_status(struct usb_hcd *hcd);
extern void usb_wakeup_notification(struct usb_device *hdev,
unsigned int portnum);
-
+extern void usb_flush_hub_wq(void);
extern void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum);
extern void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum);
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index bede892..751aa08 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -405,6 +405,8 @@ struct drm_msm_submitqueue {
#define DRM_EVENT_IDLE_NOTIFY 0x80000005
#define DRM_EVENT_PANEL_DEAD 0x80000006 /* ESD event */
#define DRM_EVENT_SDE_HW_RECOVERY 0X80000007
+#define DRM_EVENT_LTM_HIST 0X80000008
+#define DRM_EVENT_LTM_WB_PB 0X80000009
#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
diff --git a/include/uapi/drm/msm_drm_pp.h b/include/uapi/drm/msm_drm_pp.h
index cf22867..24b9895 100644
--- a/include/uapi/drm/msm_drm_pp.h
+++ b/include/uapi/drm/msm_drm_pp.h
@@ -460,4 +460,80 @@ struct drm_msm_ad4_roi_cfg {
__u32 factor_in;
__u32 factor_out;
};
+
+#define LTM_DATA_SIZE_0 32
+#define LTM_DATA_SIZE_1 128
+#define LTM_DATA_SIZE_2 256
+#define LTM_DATA_SIZE_3 33
+#define LTM_BUFFER_SIZE 5
+
+#define LTM_STATS_SAT (1 << 1)
+#define LTM_STATS_MERGE_SAT (1 << 2)
+
+/*
+ * struct drm_msm_ltm_stats_data - LTM stats data structure
+ */
+struct drm_msm_ltm_stats_data {
+ __u32 stats_01[LTM_DATA_SIZE_0][LTM_DATA_SIZE_1];
+ __u32 stats_02[LTM_DATA_SIZE_2];
+ __u32 stats_03[LTM_DATA_SIZE_0];
+ __u32 stats_04[LTM_DATA_SIZE_0];
+ __u32 stats_05[LTM_DATA_SIZE_0];
+ __u32 status_flag;
+};
+
+/*
+ * struct drm_msm_ltm_init_param - LTM init param structure
+ */
+struct drm_msm_ltm_init_param {
+ __u32 init_param_01;
+ __u32 init_param_02;
+ __u32 init_param_03;
+ __u32 init_param_04;
+};
+
+/*
+ * struct drm_msm_ltm_cfg_param - LTM config param structure
+ */
+struct drm_msm_ltm_cfg_param {
+ __u32 cfg_param_01;
+ __u32 cfg_param_02;
+ __u32 cfg_param_03;
+ __u32 cfg_param_04;
+ __u32 cfg_param_05;
+ __u32 cfg_param_06;
+};
+
+/*
+ * struct drm_msm_ltm_data - LTM data structure
+ */
+struct drm_msm_ltm_data {
+ __u32 data[LTM_DATA_SIZE_0][LTM_DATA_SIZE_3];
+};
+
+/*
+ * struct drm_msm_ltm_buffers_crtl - LTM buffer control structure.
+ * This struct will be used to init and
+ * de-init the LTM buffers in driver.
+ * @num_of_buffers: valid number of buffers used
+ * @fds: fd array to for all the valid buffers
+ */
+struct drm_msm_ltm_buffers_ctrl {
+ __u32 num_of_buffers;
+ __u32 fds[LTM_BUFFER_SIZE];
+};
+
+/*
+ * struct drm_msm_ltm_buffer - LTM buffer structure.
+ * This struct will be passed from driver to user
+ * space for LTM stats data notification.
+ * @fd: fd assicated with the buffer that has LTM stats data
+ * @offset: offset from base address that used for alignment
+ * @status status flag for error indication
+ */
+struct drm_msm_ltm_buffer {
+ __u32 fd;
+ __u32 offset;
+ __u32 status;
+};
#endif /* _MSM_DRM_PP_H_ */
diff --git a/include/uapi/media/msm_media_info.h b/include/uapi/media/msm_media_info.h
new file mode 100644
index 0000000..785a435
--- /dev/null
+++ b/include/uapi/media/msm_media_info.h
@@ -0,0 +1,1345 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __MSM_MEDIA_INFO_H__
+#define __MSM_MEDIA_INFO_H__
+
+#ifndef MSM_MEDIA_ALIGN
+#define MSM_MEDIA_ALIGN(__sz, __align) (((__align) & ((__align) - 1)) ?\
+ ((((__sz) + (__align) - 1) / (__align)) * (__align)) :\
+ (((__sz) + (__align) - 1) & (~((__align) - 1))))
+#endif
+
+#ifndef MSM_MEDIA_ROUNDUP
+#define MSM_MEDIA_ROUNDUP(__sz, __r) (((__sz) + ((__r) - 1)) / (__r))
+#endif
+
+enum color_fmts {
+ /* Venus NV12:
+ * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+ * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * U V U V U V U V U V U V . . . . ^
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . --> Buffer size alignment
+ *
+ * Y_Stride : Width aligned to 128
+ * UV_Stride : Width aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * Total size = align(Y_Stride * Y_Scanlines
+ * + UV_Stride * UV_Scanlines, 4096)
+ */
+ COLOR_FMT_NV12,
+ /* Venus NV21:
+ * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+ * by an interleaved V/U plane containing 8 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * V U V U V U V U V U V U . . . . ^
+ * V U V U V U V U V U V U . . . . |
+ * V U V U V U V U V U V U . . . . |
+ * V U V U V U V U V U V U . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . --> Padding & Buffer size alignment
+ *
+ * Y_Stride : Width aligned to 128
+ * UV_Stride : Width aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * Total size = align(Y_Stride * Y_Scanlines
+ * + UV_Stride * UV_Scanlines, 4096)
+ */
+ COLOR_FMT_NV21,
+ /*
+ * The buffer can be of 2 types:
+ * (1) Venus NV12 UBWC Progressive
+ * (2) Venus NV12 UBWC Interlaced
+ *
+ * (1) Venus NV12 UBWC Progressive Buffer Format:
+ * Compressed Macro-tile format for NV12.
+ * Contains 4 planes in the following order -
+ * (A) Y_Meta_Plane
+ * (B) Y_UBWC_Plane
+ * (C) UV_Meta_Plane
+ * (D) UV_UBWC_Plane
+ *
+ * Y_Meta_Plane consists of meta information to decode compressed
+ * tile data in Y_UBWC_Plane.
+ * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+ * UBWC decoder block will use the Y_Meta_Plane data together with
+ * Y_UBWC_Plane data to produce loss-less uncompressed 8 bit Y samples.
+ *
+ * UV_Meta_Plane consists of meta information to decode compressed
+ * tile data in UV_UBWC_Plane.
+ * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+ * UBWC decoder block will use UV_Meta_Plane data together with
+ * UV_UBWC_Plane data to produce loss-less uncompressed 8 bit 2x2
+ * subsampled color difference samples.
+ *
+ * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+ * and randomly accessible. There is no dependency between tiles.
+ *
+ * <----- Y_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_Y_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <--Compressed tile Y Stride--->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----- UV_Meta_Stride ---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <--Compressed tile UV Stride--->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ * Y_Stride = align(Width, 128)
+ * UV_Stride = align(Width, 128)
+ * Y_Scanlines = align(Height, 32)
+ * UV_Scanlines = align(Height/2, 16)
+ * Y_UBWC_Plane_size = align(Y_Stride * Y_Scanlines, 4096)
+ * UV_UBWC_Plane_size = align(UV_Stride * UV_Scanlines, 4096)
+ * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+ * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+ * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+ * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+ *
+ * Total size = align( Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+ * Y_Meta_Plane_size + UV_Meta_Plane_size, 4096)
+ *
+ *
+ * (2) Venus NV12 UBWC Interlaced Buffer Format:
+ * Compressed Macro-tile format for NV12 interlaced.
+ * Contains 8 planes in the following order -
+ * (A) Y_Meta_Top_Field_Plane
+ * (B) Y_UBWC_Top_Field_Plane
+ * (C) UV_Meta_Top_Field_Plane
+ * (D) UV_UBWC_Top_Field_Plane
+ * (E) Y_Meta_Bottom_Field_Plane
+ * (F) Y_UBWC_Bottom_Field_Plane
+ * (G) UV_Meta_Bottom_Field_Plane
+ * (H) UV_UBWC_Bottom_Field_Plane
+ * Y_Meta_Top_Field_Plane consists of meta information to decode
+ * compressed tile data for Y_UBWC_Top_Field_Plane.
+ * Y_UBWC_Top_Field_Plane consists of Y data in compressed macro-tile
+ * format for top field of an interlaced frame.
+ * UBWC decoder block will use the Y_Meta_Top_Field_Plane data together
+ * with Y_UBWC_Top_Field_Plane data to produce loss-less uncompressed
+ * 8 bit Y samples for top field of an interlaced frame.
+ *
+ * UV_Meta_Top_Field_Plane consists of meta information to decode
+ * compressed tile data in UV_UBWC_Top_Field_Plane.
+ * UV_UBWC_Top_Field_Plane consists of UV data in compressed macro-tile
+ * format for top field of an interlaced frame.
+ * UBWC decoder block will use UV_Meta_Top_Field_Plane data together
+ * with UV_UBWC_Top_Field_Plane data to produce loss-less uncompressed
+ * 8 bit subsampled color difference samples for top field of an
+ * interlaced frame.
+ *
+ * Each tile in Y_UBWC_Top_Field_Plane/UV_UBWC_Top_Field_Plane is
+ * independently decodable and randomly accessible. There is no
+ * dependency between tiles.
+ *
+ * Y_Meta_Bottom_Field_Plane consists of meta information to decode
+ * compressed tile data for Y_UBWC_Bottom_Field_Plane.
+ * Y_UBWC_Bottom_Field_Plane consists of Y data in compressed macro-tile
+ * format for bottom field of an interlaced frame.
+ * UBWC decoder block will use the Y_Meta_Bottom_Field_Plane data
+ * together with Y_UBWC_Bottom_Field_Plane data to produce loss-less
+ * uncompressed 8 bit Y samples for bottom field of an interlaced frame.
+ *
+ * UV_Meta_Bottom_Field_Plane consists of meta information to decode
+ * compressed tile data in UV_UBWC_Bottom_Field_Plane.
+ * UV_UBWC_Bottom_Field_Plane consists of UV data in compressed
+ * macro-tile format for bottom field of an interlaced frame.
+ * UBWC decoder block will use UV_Meta_Bottom_Field_Plane data together
+ * with UV_UBWC_Bottom_Field_Plane data to produce loss-less
+ * uncompressed 8 bit subsampled color difference samples for bottom
+ * field of an interlaced frame.
+ *
+ * Each tile in Y_UBWC_Bottom_Field_Plane/UV_UBWC_Bottom_Field_Plane is
+ * independently decodable and randomly accessible. There is no
+ * dependency between tiles.
+ *
+ * <-----Y_TF_Meta_Stride---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Half_height |
+ * M M M M M M M M M M M M . . | Meta_Y_TF_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-Compressed tile Y_TF Stride->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_TF_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----UV_TF_Meta_Stride---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_TF_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <-Compressed tile UV_TF Stride->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_TF_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <-----Y_BF_Meta_Stride---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Half_height |
+ * M M M M M M M M M M M M . . | Meta_Y_BF_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-Compressed tile Y_BF Stride->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_BF_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----UV_BF_Meta_Stride---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_BF_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <-Compressed tile UV_BF Stride->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_BF_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ * Half_height = (Height+1)>>1
+ * Y_TF_Stride = align(Width, 128)
+ * UV_TF_Stride = align(Width, 128)
+ * Y_TF_Scanlines = align(Half_height, 32)
+ * UV_TF_Scanlines = align((Half_height+1)/2, 32)
+ * Y_UBWC_TF_Plane_size = align(Y_TF_Stride * Y_TF_Scanlines, 4096)
+ * UV_UBWC_TF_Plane_size = align(UV_TF_Stride * UV_TF_Scanlines, 4096)
+ * Y_TF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_TF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16)
+ * Y_TF_Meta_Plane_size =
+ * align(Y_TF_Meta_Stride * Y_TF_Meta_Scanlines, 4096)
+ * UV_TF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_TF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16)
+ * UV_TF_Meta_Plane_size =
+ * align(UV_TF_Meta_Stride * UV_TF_Meta_Scanlines, 4096)
+ * Y_BF_Stride = align(Width, 128)
+ * UV_BF_Stride = align(Width, 128)
+ * Y_BF_Scanlines = align(Half_height, 32)
+ * UV_BF_Scanlines = align((Half_height+1)/2, 32)
+ * Y_UBWC_BF_Plane_size = align(Y_BF_Stride * Y_BF_Scanlines, 4096)
+ * UV_UBWC_BF_Plane_size = align(UV_BF_Stride * UV_BF_Scanlines, 4096)
+ * Y_BF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_BF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16)
+ * Y_BF_Meta_Plane_size =
+ * align(Y_BF_Meta_Stride * Y_BF_Meta_Scanlines, 4096)
+ * UV_BF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_BF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16)
+ * UV_BF_Meta_Plane_size =
+ * align(UV_BF_Meta_Stride * UV_BF_Meta_Scanlines, 4096)
+ *
+ * Total size = align( Y_UBWC_TF_Plane_size + UV_UBWC_TF_Plane_size +
+ * Y_TF_Meta_Plane_size + UV_TF_Meta_Plane_size +
+ * Y_UBWC_BF_Plane_size + UV_UBWC_BF_Plane_size +
+ * Y_BF_Meta_Plane_size + UV_BF_Meta_Plane_size +, 4096)
+ */
+ COLOR_FMT_NV12_UBWC,
+ /* Venus NV12 10-bit UBWC:
+ * Compressed Macro-tile format for NV12.
+ * Contains 4 planes in the following order -
+ * (A) Y_Meta_Plane
+ * (B) Y_UBWC_Plane
+ * (C) UV_Meta_Plane
+ * (D) UV_UBWC_Plane
+ *
+ * Y_Meta_Plane consists of meta information to decode compressed
+ * tile data in Y_UBWC_Plane.
+ * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+ * UBWC decoder block will use the Y_Meta_Plane data together with
+ * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples.
+ *
+ * UV_Meta_Plane consists of meta information to decode compressed
+ * tile data in UV_UBWC_Plane.
+ * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+ * UBWC decoder block will use UV_Meta_Plane data together with
+ * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2
+ * subsampled color difference samples.
+ *
+ * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+ * and randomly accessible. There is no dependency between tiles.
+ *
+ * <----- Y_Meta_Stride ----->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_Y_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <--Compressed tile Y Stride--->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----- UV_Meta_Stride ---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <--Compressed tile UV Stride--->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ *
+ * Y_Stride = align(Width * 4/3, 128)
+ * UV_Stride = align(Width * 4/3, 128)
+ * Y_Scanlines = align(Height, 32)
+ * UV_Scanlines = align(Height/2, 16)
+ * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
+ * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
+ * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+ * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+ * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+ * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+ *
+ * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+ * Y_Meta_Plane_size + UV_Meta_Plane_size, 4096)
+ */
+ COLOR_FMT_NV12_BPP10_UBWC,
+ /* Venus RGBA8888 format:
+ * Contains 1 plane in the following order -
+ * (A) RGBA plane
+ *
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 4, 128)
+ * RGB_Scanlines = align(Height, 32)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ *
+ * Total size = align(RGB_Plane_size , 4096)
+ */
+ COLOR_FMT_RGBA8888,
+ /* Venus RGBA8888 UBWC format:
+ * Contains 2 planes in the following order -
+ * (A) Meta plane
+ * (B) RGBA plane
+ *
+ * <--- RGB_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_RGB_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 4, 128)
+ * RGB_Scanlines = align(Height, 32)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+ * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+ * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+ * RGB_Meta_Scanlines, 4096)
+ *
+ * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size, 4096)
+ */
+ COLOR_FMT_RGBA8888_UBWC,
+ /* Venus RGBA1010102 UBWC format:
+ * Contains 2 planes in the following order -
+ * (A) Meta plane
+ * (B) RGBA plane
+ *
+ * <--- RGB_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_RGB_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 4, 256)
+ * RGB_Scanlines = align(Height, 16)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+ * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+ * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+ * RGB_Meta_Scanlines, 4096)
+ *
+ * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size, 4096)
+ */
+ COLOR_FMT_RGBA1010102_UBWC,
+ /* Venus RGB565 UBWC format:
+ * Contains 2 planes in the following order -
+ * (A) Meta plane
+ * (B) RGB plane
+ *
+ * <--- RGB_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_RGB_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 2, 128)
+ * RGB_Scanlines = align(Height, 16)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+ * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+ * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+ * RGB_Meta_Scanlines, 4096)
+ *
+ * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size, 4096)
+ */
+ COLOR_FMT_RGB565_UBWC,
+ /* P010 UBWC:
+ * Compressed Macro-tile format for NV12.
+ * Contains 4 planes in the following order -
+ * (A) Y_Meta_Plane
+ * (B) Y_UBWC_Plane
+ * (C) UV_Meta_Plane
+ * (D) UV_UBWC_Plane
+ *
+ * Y_Meta_Plane consists of meta information to decode compressed
+ * tile data in Y_UBWC_Plane.
+ * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+ * UBWC decoder block will use the Y_Meta_Plane data together with
+ * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples.
+ *
+ * UV_Meta_Plane consists of meta information to decode compressed
+ * tile data in UV_UBWC_Plane.
+ * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+ * UBWC decoder block will use UV_Meta_Plane data together with
+ * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2
+ * subsampled color difference samples.
+ *
+ * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+ * and randomly accessible. There is no dependency between tiles.
+ *
+ * <----- Y_Meta_Stride ----->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_Y_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <--Compressed tile Y Stride--->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----- UV_Meta_Stride ---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <--Compressed tile UV Stride--->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ *
+ * Y_Stride = align(Width * 2, 256)
+ * UV_Stride = align(Width * 2, 256)
+ * Y_Scanlines = align(Height, 16)
+ * UV_Scanlines = align(Height/2, 16)
+ * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
+ * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
+ * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+ * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+ * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+ * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+ *
+ * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+ * Y_Meta_Plane_size + UV_Meta_Plane_size, 4096)
+ */
+ COLOR_FMT_P010_UBWC,
+ /* Venus P010:
+ * YUV 4:2:0 image with a plane of 10 bit Y samples followed
+ * by an interleaved U/V plane containing 10 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * U V U V U V U V U V U V . . . . ^
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . --> Buffer size alignment
+ *
+ * Y_Stride : Width * 2 aligned to 256
+ * UV_Stride : Width * 2 aligned to 256
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * Total size = align(Y_Stride * Y_Scanlines
+ * + UV_Stride * UV_Scanlines, 4096)
+ */
+ COLOR_FMT_P010,
+ /* Venus NV12_512:
+ * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+ * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * U V U V U V U V U V U V . . . . ^
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . --> Buffer size alignment
+ *
+ * Y_Stride : Width aligned to 512
+ * UV_Stride : Width aligned to 512
+ * Y_Scanlines: Height aligned to 512
+ * UV_Scanlines: Height/2 aligned to 256
+ * Total size = align((Y_Stride * Y_Scanlines
+ * + UV_Stride * UV_Scanlines), 4096)
+ */
+ COLOR_FMT_NV12_512,
+};
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_Y_STRIDE(unsigned int color_fmt,
+ unsigned int width)
+{
+ unsigned int alignment, stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_UBWC:
+ alignment = 128;
+ stride = MSM_MEDIA_ALIGN(width, alignment);
+ break;
+ case COLOR_FMT_NV12_512:
+ alignment = 512;
+ stride = MSM_MEDIA_ALIGN(width, alignment);
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ alignment = 256;
+ stride = MSM_MEDIA_ALIGN(width, 192);
+ stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+ break;
+ case COLOR_FMT_P010_UBWC:
+ case COLOR_FMT_P010:
+ alignment = 256;
+ stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+ break;
+ default:
+ break;
+ }
+invalid_input:
+ return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_UV_STRIDE(unsigned int color_fmt,
+ unsigned int width)
+{
+ unsigned int alignment, stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_UBWC:
+ alignment = 128;
+ stride = MSM_MEDIA_ALIGN(width, alignment);
+ break;
+ case COLOR_FMT_NV12_512:
+ alignment = 512;
+ stride = MSM_MEDIA_ALIGN(width, alignment);
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ alignment = 256;
+ stride = MSM_MEDIA_ALIGN(width, 192);
+ stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+ break;
+ case COLOR_FMT_P010_UBWC:
+ case COLOR_FMT_P010:
+ alignment = 256;
+ stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+ break;
+ default:
+ break;
+ }
+invalid_input:
+ return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_Y_SCANLINES(unsigned int color_fmt,
+ unsigned int height)
+{
+ unsigned int alignment, sclines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_UBWC:
+ case COLOR_FMT_P010:
+ alignment = 32;
+ break;
+ case COLOR_FMT_NV12_512:
+ alignment = 512;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ alignment = 16;
+ break;
+ default:
+ return 0;
+ }
+ sclines = MSM_MEDIA_ALIGN(height, alignment);
+invalid_input:
+ return sclines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_UV_SCANLINES(unsigned int color_fmt,
+ unsigned int height)
+{
+ unsigned int alignment, sclines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ case COLOR_FMT_P010:
+ alignment = 16;
+ break;
+ case COLOR_FMT_NV12_512:
+ alignment = 256;
+ break;
+ case COLOR_FMT_NV12_UBWC:
+ alignment = 32;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ sclines = MSM_MEDIA_ALIGN((height+1)>>1, alignment);
+
+invalid_input:
+ return sclines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_Y_META_STRIDE(unsigned int color_fmt,
+ unsigned int width)
+{
+ int y_tile_width = 0, y_meta_stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ y_tile_width = 32;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ y_tile_width = 48;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ y_meta_stride = MSM_MEDIA_ROUNDUP(width, y_tile_width);
+ y_meta_stride = MSM_MEDIA_ALIGN(y_meta_stride, 64);
+
+invalid_input:
+ return y_meta_stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_Y_META_SCANLINES(unsigned int color_fmt,
+ unsigned int height)
+{
+ int y_tile_height = 0, y_meta_scanlines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ y_tile_height = 8;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ y_tile_height = 4;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ y_meta_scanlines = MSM_MEDIA_ROUNDUP(height, y_tile_height);
+ y_meta_scanlines = MSM_MEDIA_ALIGN(y_meta_scanlines, 16);
+
+invalid_input:
+ return y_meta_scanlines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_UV_META_STRIDE(unsigned int color_fmt,
+ unsigned int width)
+{
+ int uv_tile_width = 0, uv_meta_stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ uv_tile_width = 16;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ uv_tile_width = 24;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ uv_meta_stride = MSM_MEDIA_ROUNDUP((width+1)>>1, uv_tile_width);
+ uv_meta_stride = MSM_MEDIA_ALIGN(uv_meta_stride, 64);
+
+invalid_input:
+ return uv_meta_stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_UV_META_SCANLINES(unsigned int color_fmt,
+ unsigned int height)
+{
+ int uv_tile_height = 0, uv_meta_scanlines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ uv_tile_height = 8;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ uv_tile_height = 4;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ uv_meta_scanlines = MSM_MEDIA_ROUNDUP((height+1)>>1, uv_tile_height);
+ uv_meta_scanlines = MSM_MEDIA_ALIGN(uv_meta_scanlines, 16);
+
+invalid_input:
+ return uv_meta_scanlines;
+}
+
+static inline unsigned int VENUS_RGB_STRIDE(unsigned int color_fmt,
+ unsigned int width)
+{
+ unsigned int alignment = 0, stride = 0, bpp = 4;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888:
+ alignment = 128;
+ break;
+ case COLOR_FMT_RGB565_UBWC:
+ alignment = 256;
+ bpp = 2;
+ break;
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ alignment = 256;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ stride = MSM_MEDIA_ALIGN(width * bpp, alignment);
+
+invalid_input:
+ return stride;
+}
+
+static inline unsigned int VENUS_RGB_SCANLINES(unsigned int color_fmt,
+ unsigned int height)
+{
+ unsigned int alignment = 0, scanlines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888:
+ alignment = 32;
+ break;
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ alignment = 16;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ scanlines = MSM_MEDIA_ALIGN(height, alignment);
+
+invalid_input:
+ return scanlines;
+}
+
+static inline unsigned int VENUS_RGB_META_STRIDE(unsigned int color_fmt,
+ unsigned int width)
+{
+ int rgb_tile_width = 0, rgb_meta_stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ rgb_tile_width = 16;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, rgb_tile_width);
+ rgb_meta_stride = MSM_MEDIA_ALIGN(rgb_meta_stride, 64);
+
+invalid_input:
+ return rgb_meta_stride;
+}
+
+static inline unsigned int VENUS_RGB_META_SCANLINES(unsigned int color_fmt,
+ unsigned int height)
+{
+ int rgb_tile_height = 0, rgb_meta_scanlines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ rgb_tile_height = 4;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, rgb_tile_height);
+ rgb_meta_scanlines = MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16);
+
+invalid_input:
+ return rgb_meta_scanlines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ * @height
+ * Progressive: height
+ * Interlaced: height
+ */
+static inline unsigned int VENUS_BUFFER_SIZE(unsigned int color_fmt,
+ unsigned int width, unsigned int height)
+{
+ unsigned int size = 0;
+ unsigned int y_plane, uv_plane, y_stride,
+ uv_stride, y_sclines, uv_sclines;
+ unsigned int y_ubwc_plane = 0, uv_ubwc_plane = 0;
+ unsigned int y_meta_stride = 0, y_meta_scanlines = 0;
+ unsigned int uv_meta_stride = 0, uv_meta_scanlines = 0;
+ unsigned int y_meta_plane = 0, uv_meta_plane = 0;
+ unsigned int rgb_stride = 0, rgb_scanlines = 0;
+ unsigned int rgb_plane = 0, rgb_ubwc_plane = 0, rgb_meta_plane = 0;
+ unsigned int rgb_meta_stride = 0, rgb_meta_scanlines = 0;
+
+ if (!width || !height)
+ goto invalid_input;
+
+ y_stride = VENUS_Y_STRIDE(color_fmt, width);
+ uv_stride = VENUS_UV_STRIDE(color_fmt, width);
+ y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
+ uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
+ rgb_stride = VENUS_RGB_STRIDE(color_fmt, width);
+ rgb_scanlines = VENUS_RGB_SCANLINES(color_fmt, height);
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_P010:
+ case COLOR_FMT_NV12_512:
+ y_plane = y_stride * y_sclines;
+ uv_plane = uv_stride * uv_sclines;
+ size = y_plane + uv_plane;
+ break;
+ case COLOR_FMT_NV12_UBWC:
+ y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+ uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+ if ((width <= 1920 && height <= 1088) ||
+ (width <= 1088 && height <= 1920)) {
+ y_sclines =
+ VENUS_Y_SCANLINES(color_fmt, (height+1)>>1);
+ y_ubwc_plane =
+ MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+ uv_sclines =
+ VENUS_UV_SCANLINES(color_fmt, (height+1)>>1);
+ uv_ubwc_plane =
+ MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+ y_meta_scanlines =
+ VENUS_Y_META_SCANLINES(color_fmt, (height+1)>>1);
+ y_meta_plane = MSM_MEDIA_ALIGN(
+ y_meta_stride * y_meta_scanlines, 4096);
+ uv_meta_scanlines =
+ VENUS_UV_META_SCANLINES(color_fmt, (height+1)>>1);
+ uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+ uv_meta_scanlines, 4096);
+ size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+ uv_meta_plane)*2;
+ } else {
+ y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
+ y_ubwc_plane =
+ MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+ uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
+ uv_ubwc_plane =
+ MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+ y_meta_scanlines =
+ VENUS_Y_META_SCANLINES(color_fmt, height);
+ y_meta_plane = MSM_MEDIA_ALIGN(
+ y_meta_stride * y_meta_scanlines, 4096);
+ uv_meta_scanlines =
+ VENUS_UV_META_SCANLINES(color_fmt, height);
+ uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+ uv_meta_scanlines, 4096);
+ size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+ uv_meta_plane);
+ }
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+ uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+ y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+ y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
+ y_meta_plane = MSM_MEDIA_ALIGN(
+ y_meta_stride * y_meta_scanlines, 4096);
+ uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+ uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
+ uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+ uv_meta_scanlines, 4096);
+
+ size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+ uv_meta_plane;
+ break;
+ case COLOR_FMT_P010_UBWC:
+ y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+ uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+ y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+ y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
+ y_meta_plane = MSM_MEDIA_ALIGN(
+ y_meta_stride * y_meta_scanlines, 4096);
+ uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+ uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
+ uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+ uv_meta_scanlines, 4096);
+
+ size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+ uv_meta_plane;
+ break;
+ case COLOR_FMT_RGBA8888:
+ rgb_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines, 4096);
+ size = rgb_plane;
+ break;
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ rgb_ubwc_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines,
+ 4096);
+ rgb_meta_stride = VENUS_RGB_META_STRIDE(color_fmt, width);
+ rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color_fmt,
+ height);
+ rgb_meta_plane = MSM_MEDIA_ALIGN(rgb_meta_stride *
+ rgb_meta_scanlines, 4096);
+ size = rgb_ubwc_plane + rgb_meta_plane;
+ break;
+ default:
+ break;
+ }
+invalid_input:
+ return MSM_MEDIA_ALIGN(size, 4096);
+}
+
+static inline unsigned int VENUS_BUFFER_SIZE_USED(unsigned int color_fmt,
+ unsigned int width, unsigned int height, unsigned int interlace)
+{
+ unsigned int size = 0;
+ unsigned int y_stride, uv_stride, y_sclines, uv_sclines;
+ unsigned int y_ubwc_plane = 0, uv_ubwc_plane = 0;
+ unsigned int y_meta_stride = 0, y_meta_scanlines = 0;
+ unsigned int uv_meta_stride = 0, uv_meta_scanlines = 0;
+ unsigned int y_meta_plane = 0, uv_meta_plane = 0;
+
+ if (!width || !height)
+ goto invalid_input;
+
+ if (!interlace && color_fmt == COLOR_FMT_NV12_UBWC) {
+ y_stride = VENUS_Y_STRIDE(color_fmt, width);
+ uv_stride = VENUS_UV_STRIDE(color_fmt, width);
+ y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
+ y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+ uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
+ uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+ y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+ y_meta_scanlines =
+ VENUS_Y_META_SCANLINES(color_fmt, height);
+ y_meta_plane = MSM_MEDIA_ALIGN(
+ y_meta_stride * y_meta_scanlines, 4096);
+ uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+ uv_meta_scanlines =
+ VENUS_UV_META_SCANLINES(color_fmt, height);
+ uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+ uv_meta_scanlines, 4096);
+ size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+ uv_meta_plane);
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ } else {
+ size = VENUS_BUFFER_SIZE(color_fmt, width, height);
+ }
+invalid_input:
+ return size;
+}
+
+#endif
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 5a584cd..c10bd82 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -696,7 +696,8 @@ static int usb_audio_probe(struct usb_interface *intf,
if (chip) {
if (!chip->num_interfaces)
snd_card_free(chip->card);
- atomic_dec(&chip->active);
+ else
+ atomic_dec(&chip->active);
}
mutex_unlock(®ister_mutex);
return err;
diff --git a/sound/usb/helper.c b/sound/usb/helper.c
index 7712e2b..4783648 100644
--- a/sound/usb/helper.c
+++ b/sound/usb/helper.c
@@ -122,7 +122,7 @@ unsigned char snd_usb_parse_datainterval(struct snd_usb_audio *chip,
case USB_SPEED_SUPER:
case USB_SPEED_SUPER_PLUS:
if (get_endpoint(alts, 0)->bInterval >= 1 &&
- get_endpoint(alts, 0)->bInterval <= 4)
+ get_endpoint(alts, 0)->bInterval <= 16)
return get_endpoint(alts, 0)->bInterval - 1;
break;
default: