Merge "soc: qcom: socinfo: Add support for SDM632 soc-id"
diff --git a/Documentation/devicetree/bindings/arm/msm/rpm_master_stats.txt b/Documentation/devicetree/bindings/arm/msm/rpm_master_stats.txt
new file mode 100644
index 0000000..26a4396
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/rpm_master_stats.txt
@@ -0,0 +1,47 @@
+* RPM Stats
+
+RPM maintains a counter of the masters i.e APPS, MPPS etc
+number of times the SoC entered a deeper sleep mode involving
+lowering or powering down the backbone rails - Cx and Mx and
+the oscillator clock, XO.
+
+PROPERTIES
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: Should be "qcom,rpm-master-stats".
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: The address on the RPM RAM from where the stats are read
+ should be provided as "phys_addr_base". The offset from
+ which the stats are available should be provided as
+ "offset_addr".
+
+- reg-names:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Provides labels for the reg property.
+
+- qcom,masters:
+ Usage: required
+ Value tye: <string list>
+ Defination: Provides the masters list.
+
+qcom,master-offset:
+ Usage: required
+ Value tye: <prop-encoded-array>
+ Defination: Provides the masters list
+
+EXAMPLE:
+
+qcom,rpm-master-stats@60150 {
+ compatible = "qcom,rpm-master-stats";
+ reg = <0x60150 0x5000>;
+ qcom,masters = "APSS", "MPSS", "PRONTO", "TZ", "LPASS";
+ qcom,master-stats-version = <2>;
+ qcom,master-offset = <4096>;
+ };
+
diff --git a/Documentation/devicetree/bindings/arm/msm/rpmh-master-stat.txt b/Documentation/devicetree/bindings/arm/msm/rpmh-master-stat.txt
new file mode 100644
index 0000000..36e1a69
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/rpmh-master-stat.txt
@@ -0,0 +1,18 @@
+* RPMH Master Stats
+
+Differet Subsystems maintains master data in SMEM.
+It tells about the individual masters information at any given
+time like "system sleep counts", "system sleep last entered at"
+and "system sleep accumulated duration" etc. These stats can be
+show to the user using the debugfs interface of the kernel.
+To achieve this, device tree node has been added.
+
+The required properties for rpmh-master-stats are:
+
+- compatible: "qcom,rpmh-master-stats".
+
+Example:
+
+qcom,rpmh-master-stats {
+ compatible = "qcom,rpmh-master-stats";
+};
diff --git a/Documentation/devicetree/bindings/cnss/icnss.txt b/Documentation/devicetree/bindings/cnss/icnss.txt
index 700a8f7..ad9d190 100644
--- a/Documentation/devicetree/bindings/cnss/icnss.txt
+++ b/Documentation/devicetree/bindings/cnss/icnss.txt
@@ -29,6 +29,7 @@
- qcom,icnss-adc_tm: VADC handle for vph_pwr notification APIs.
- qcom,smmu-s1-bypass: Boolean context flag to set SMMU to S1 bypass
- qcom,wlan-msa-fixed-region: phandle, specifier pairs to children of /reserved-memory
+ - qcom,gpio-force-fatal-error: SMP2P bit triggered by WLAN FW to force error fatal.
Example:
@@ -59,4 +60,5 @@
qcom,smmu-s1-bypass;
vdd-0.8-cx-mx-supply = <&pm8998_l5>;
qcom,vdd-0.8-cx-mx-config = <800000 800000 2400 1000>;
+ qcom,gpio-forced-fatal-error = <&smp2pgpio_wlan_1_in 0 0>;
};
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt
index 69174ca..2a7ac91 100644
--- a/Documentation/devicetree/bindings/gpu/adreno.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno.txt
@@ -191,6 +191,9 @@
- qcom,gpu-quirk-hfi-use-reg:
Use registers to replace DCVS HFI message to avoid GMU failure
to access system memory during IFPC
+- qcom,gpu-quirk-limit-uche-gbif-rw:
+ Limit number of read and write transactions from UCHE block to
+ GBIF to avoid possible deadlock between GBIF, SMMU and MEMNOC.
KGSL Memory Pools:
- qcom,gpu-mempools: Container for sets of GPU mempools.Multiple sets
diff --git a/Documentation/devicetree/bindings/input/touchscreen/synaptics_dsx_i2c.txt b/Documentation/devicetree/bindings/input/touchscreen/synaptics_dsx_i2c.txt
new file mode 100644
index 0000000..131942d
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/synaptics_dsx_i2c.txt
@@ -0,0 +1,62 @@
+Synaptics DSXV27 touch controller
+
+Please add this description here: The Synaptics Touch controller is connected to the
+host processor via I2C. The controller generates interrupts when the user touches
+the panel. The host controller is expected to read the touch coordinates over I2C and
+pass the coordinates to the rest of the system.
+
+Required properties:
+
+ - compatible : should be "synaptics,dsx-i2c".
+ - reg : i2c slave address of the device.
+ - interrupt-parent : parent of interrupt.
+ - synaptics,irq-gpio : irq gpio.
+ - synaptics,reset-gpio : reset gpio.
+ - vdd_supply : digital voltage power supply needed to power device.
+ - avdd_supply : analog voltage power supply needed to power device.
+ - synaptics,pwr-reg-name : power reg name of digital voltage.
+ - synaptics,bus-reg-name : bus reg name of analog voltage.
+
+Optional property:
+ - synaptics,ub-i2c-addr : addr of ub-i2c.
+ - synaptics,irq-on-state : status of irq gpio.
+ - synaptics,cap-button-codes : virtual key code mappings to be used.
+ - synaptics,vir-button-codes : virtual key code and the response region on panel.
+ - synaptics,x-flip : modify orientation of the x axis.
+ - synaptics,y-flip : modify orientation of the y axis.
+ - synaptics,reset-delay-ms : reset delay for controller (ms), default 100.
+ - synaptics,power-delay-ms : power delay for controller (ms), default 100.
+ - synaptics,reset-active-ms : reset active time for controller (ms), default 20.
+ - synaptics,max-y-for-2d : maximal y value of the panel.
+ - clock-names : Clock names used for secure touch. They are: "iface_clk", "core_clk"
+ - clocks : Defined if 'clock-names' DT property is defined. These clocks
+ are associated with the underlying I2C bus.
+
+Example:
+ i2c@78b7000 {
+ status = "ok";
+ synaptics@4b {
+ compatible = "synaptics,dsx-i2c";
+ reg = <0x4b>;
+ interrupt-parent = <&tlmm>;
+ interrupts = <65 0x2008>;
+ vdd_supply = <&pmtitanium_l17>;
+ avdd_supply = <&pmtitanium_l6>;
+ synaptics,pwr-reg-name = "vdd";
+ synaptics,bus-reg-name = "avdd";
+ synaptics,ub-i2c-addr = <0x2c>;
+ synaptics,irq-gpio = <&tlmm 65 0x2008>;
+ synaptics,reset-gpio = <&tlmm 99 0x2008>;
+ synaptics,irq-on-state = <0>;
+ synaptics,power-delay-ms = <200>;
+ synaptics,reset-delay-ms = <200>;
+ synaptics,reset-active-ms = <20>;
+ synaptics,max-y-for-2d = <1919>; /* remove if no virtual buttons */
+ synaptics,cap-button-codes = <139 172 158>;
+ synaptics,vir-button-codes = <139 180 2000 320 160 172 540 2000 320 160 158 900 2000 320 160>;
+ /* Underlying clocks used by secure touch */
+ clock-names = "iface_clk", "core_clk";
+ clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+ <&clock_gcc clk_gcc_blsp1_qup3_i2c_apps_clk>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
index 2a7e161..78aa1d7 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
@@ -74,6 +74,9 @@
address size faults are due to a fundamental programming
error from which we don't care about recovering anyways.
+- qcom,tz-device-id : A string indicating the device ID for this SMMU known
+ to TZ. See msm_tz_smmu.c for a full list of mappings.
+
- qcom,skip-init : Disable resetting configuration for all context banks
during device reset. This is useful for targets where
some context banks are dedicated to other execution
diff --git a/Documentation/devicetree/bindings/net/qcom,emac-dwc-eqos.txt b/Documentation/devicetree/bindings/net/qcom,emac-dwc-eqos.txt
index 8e56180..eff3d82 100644
--- a/Documentation/devicetree/bindings/net/qcom,emac-dwc-eqos.txt
+++ b/Documentation/devicetree/bindings/net/qcom,emac-dwc-eqos.txt
@@ -11,6 +11,16 @@
- interrupts: Interrupt number used by this controller
- io-macro-info: Internal io-macro-info
+Optional:
+- qcom,msm-bus,name: String representing the client-name
+- qcom,msm-bus,num-cases: Total number of usecases
+- qcom,msm-bus,num-paths: Total number of master-slave pairs
+- qcom,msm-bus,vectors-KBps: Arrays of unsigned integers representing:
+ master-id, slave-id, arbitrated bandwidth
+ in KBps, instantaneous bandwidth in KBps
+qcom,bus-vector-names: specifies string IDs for the corresponding bus vectors
+ in the same order as qcom,msm-bus,vectors-KBps property.
+
Internal io-macro-info:
- io-macro-bypass-mode: <0 or 1> internal or external delay configuration
- io-interface: <rgmii/mii/rmii> PHY interface used
@@ -35,6 +45,14 @@
"tx-ch4-intr", "rx-ch0-intr",
"rx-ch1-intr", "rx-ch2-intr",
"rx-ch3-intr";
+ qcom,msm-bus,name = "emac";
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <2>;
+ qcom,msm-bus,vectors-KBps =
+ <98 512 1250 0>, <1 781 0 40000>, /* 10Mbps vote */
+ <98 512 12500 0>, <1 781 0 40000>, /* 100Mbps vote */
+ <98 512 125000 0>, <1 781 0 40000>; /* 1000Mbps vote */
+ qcom,bus-vector-names = "10", "100", "1000";
io-macro-info {
io-macro-bypass-mode = <0>;
io-interface = "rgmii";
diff --git a/Documentation/devicetree/bindings/nvmem/qcom-spmi-sdam.txt b/Documentation/devicetree/bindings/nvmem/qcom-spmi-sdam.txt
new file mode 100644
index 0000000..b849a22
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/qcom-spmi-sdam.txt
@@ -0,0 +1,49 @@
+Qualcomm Technologies, Inc. Shared Direct Access Memory (SDAM)
+
+The SDAM provides scratch register space for the PMIC clients.
+This memory can be used by software to store information or communicate
+to/from the PBUS.
+
+Below are the DT bindings for this module
+
+Supported properties:
+
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: Should be "qcom,spmi-sdam"
+
+- reg
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: The base address and size of the sdam peripheral.
+
+- Data cells
+ Usage: required
+ Value type: Subnodes with bindings described in bindings/nvmem/nvmem.txt.
+ Definition: Cells defining the shared memory usage and configuration.
+
+Example:
+
+ sdam_1: sdam@b000 {
+ compatible = "qcom,spmi-sdam";
+ reg = <0xb000 0x100>;
+
+ ....
+ /* Data cells */
+ restart_reason: restart@50 {
+ reg = <0x50 0x1>;
+ bits = <7 2>;
+ };
+ };
+
+= Data consumers =
+Are device nodes which consume nvmem data cells.
+
+Example:
+
+ {
+ ...
+ nvmem-cells = <&restart_reason>;
+ nvmem-cell-names = "pmic_restart_reason";
+ };
diff --git a/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt b/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt
index 4a69e03..f8329a9 100644
--- a/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt
+++ b/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt
@@ -68,6 +68,7 @@
first segment to end address of last segment will be collected without
leaving any hole in between.
- qcom,ignore-ssr-failure: Boolean. If set, SSR failures are not considered fatal.
+- qcom,mas-crypto: Reference to the bus master of crypto core.
Example:
qcom,venus@fdce0000 {
diff --git a/Documentation/devicetree/bindings/power/supply/nx30p6093.txt b/Documentation/devicetree/bindings/power/supply/nx30p6093.txt
new file mode 100644
index 0000000..3d579bb
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/nx30p6093.txt
@@ -0,0 +1,72 @@
+Binding for NXP NX30P6093 moisture detection module
+
+NX30P6093 is an I2C controlled module and features an input impedance detection
+function, along with OVP protection upto 29V. The impedance detection can detect
+moisture or dust on USB lines and report the same to system to take necessary
+steps to avoid circuit damage to the Type-C port power supply pin.
+
+=======================
+Supported Properties
+=======================
+
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: should be "nxp,nx30p6093".
+
+- reg
+ Usage: required
+ Value type: <u32>
+ Definition: The device 8-bit I2C address.
+
+- interrupts
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Moisture detect interrupt specifier.
+
+- nxp,always-on-detect
+ Usage: optional
+ Value type: <boolean>
+ Definition: If specified, the NX30P6093 is configured to perform
+ mositure detection on every detection duty cycle configured
+ in "nxp,always-on-tduty-val" property.
+
+- nxp,always-on-tduty-ms
+ Usage: required if "nxp,always-on-detect" specified
+ Value type: <u32>
+ Definition: The detection duty cycle (Tduty) in milliseconds.
+ Supported values are 10, 20, 50, 100, 200, 500, 1000,
+ 2000, 3000, 6000, 12000, 30000, 60000, 120000 and 300000.
+ If this property is not specified a default value of
+ 300000 milliseconds is used.
+
+- nxp,long-wakeup-sec
+ Usage: required if "nxp,always-on-detect" not specified.
+ Value type: <u32>
+ Definition: A longer time interval in seconds maintained between
+ moisture detection events after a previous moisture
+ detection event resulted in a good impedance detected
+ on USB lines. If this property is not specified a default
+ value of 28800 seconds (8 hrs) is used.
+
+- nxp,short-wakeup-ms
+ Usage: required if "nxp,always-on-detect" not specified.
+ Value type: <u32>
+ Definition: A shorter time interval in milliseconds maintained
+ between moisture detection events after a previous
+ moisture detection event resulted in a bad impedance
+ detected on USB lines. If this property is not specified
+ a default value of 180000 milliseconds (3 mins) is used.
+
+=======
+Example
+=======
+
+nx30p6093@0 {
+ compatible = "nxp,nx30p6093";
+ reg = <0x36>;
+ interrupt-parent = <&tlmm>;
+ interrupts = <5 IRQ_TYPE_NONE>;
+ nxp,long-wakeup-sec = <28800>; /* 8 hours */
+ nxp,short-wakeup-ms = <180000>; /* 3 mins */
+};
diff --git a/Documentation/devicetree/bindings/usb/msm-hsusb.txt b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
index 8654a3e..89c817e 100644
--- a/Documentation/devicetree/bindings/usb/msm-hsusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
@@ -1,110 +1,315 @@
MSM SoC HSUSB controllers
-EHCI
+OTG:
-Required properties:
-- compatible: Should contain "qcom,ehci-host"
-- regs: offset and length of the register set in the memory map
-- usb-phy: phandle for the PHY device
+Required properties :
+- compatible : should be "qcom,hsusb-otg"
+- regs : Array of offset and length of the register sets in the memory map
+- reg-names : indicates various iomem resources passed by name. The possible
+ strings in this field are:
+ "core": USB controller register space. (Required)
+ "tcsr": TCSR register for routing USB Controller signals to
+ either picoPHY0 or picoPHY1. (Optional)
+ "phy_csr": PHY Wrapper CSR register space. Provides register level
+ interface through AHB2PHY for performing PHY related operations
+ like retention and HV interrupts management.
+- interrupts: IRQ line
+- interrupt-names: OTG interrupt name(s) referenced in interrupts above
+ HSUSB OTG expects "core_irq" which is IRQ line from CORE and
+ "async_irq" from HSPHY for asynchronous wakeup events in LPM.
+ optional ones are described in next section.
+- qcom,hsusb-otg-phy-type: PHY type can be one of
+ 1 - Chipidea PHY (obsolete)
+ 2 - Synopsis Pico PHY
+ 3 - Synopsis Femto PHY
+ 4 - QUSB ULPI PHY
+- qcom,hsusb-otg-mode: Operational mode. Can be one of
+ 1 - Peripheral only mode
+ 2 - Host only mode
+ 3 - OTG mode
+ Based on the mode, OTG driver registers platform devices for
+ gadget and host.
+- qcom,hsusb-otg-otg-control: OTG control (VBUS and ID notifications)
+ can be one of
+ 1 - PHY control
+ 2 - PMIC control
+ 3 - User control (via debugfs)
+- <supply-name>-supply: handle to the regulator device tree node
+ Required "supply-name" is "HSUSB_VDDCX" (when voting for VDDCX) or
+ "hsusb_vdd_dig" (when voting for VDDCX Corner voltage),
+ "HSUSB_1p8-supply" and "HSUSB_3p3-supply".
+- qcom,vdd-voltage-level: This property must be a list of three integer
+ values (none, min, max) where each value represents either a voltage
+ in microvolts or a value corresponding to voltage corner. If usb core
+ supports svs, min value will have absolute SVS or SVS corner otherwise
+ min value will have absolute nominal or nominal corner.
+- clocks: a list of phandles to the USB clocks. Usage is as per
+ Documentation/devicetree/bindings/clock/clock-bindings.txt
+- clock-names: Names of the clocks in 1-1 correspondence with the "clocks"
+ property.
-Example EHCI controller device node:
+ Required clocks:
+ "core_clk": USB core clock that is required for data transfers.
+ "iface_clk": USB core clock that is required for register access.
- ehci: ehci@f9a55000 {
- compatible = "qcom,ehci-host";
- reg = <0xf9a55000 0x400>;
- usb-phy = <&usb_otg>;
+ Optional clocks:
+ "sleep_clk": PHY sleep clock. Required for interrupts.
+ "phy_reset_clk": PHY blocks asynchronous reset clock. Required
+ for the USB block reset. It is a reset only clock.
+ "phy_por_clk": Reset only clock for asserting/de-asserting
+ PHY POR signal. Required for overriding PHY parameters.
+ "phy_csr_clk": Required for accessing PHY CSR registers through
+ AHB2PHY interface.
+ "phy_ref_clk": Required when PHY have referance clock,
+ "xo": XO clock. The source clock that is used as a reference clock
+ to the PHY.
+ "bimc_clk", "snoc_clk", "pcnoc_clk": bus voting clocks. Used to
+ keep buses at a nominal frequency during USB peripheral
+ mode for achieving max throughput.
+- qcom,max-nominal-sysclk-rate: Indicates maximum nominal frequency for which
+ system clock should be voted whenever streaming mode is enabled.
+- resets: reset specifier pair consists of phandle for the reset provider
+ and reset lines used by this controller.
+- reset-names: reset signal name strings sorted in the same order as the resets
+ property.
+
+Optional properties :
+- interrupt-names : Optional interrupt resource entries are:
+ "pmic_id_irq" : Interrupt from PMIC for external ID pin notification.
+ "phy_irq" : Interrupt from PHY. Used for ID detection.
+- qcom,hsusb-otg-disable-reset: If present then core is RESET only during
+ init, otherwise core is RESET for every cable disconnect as well
+- qcom,hsusb-otg-pnoc-errata-fix: If present then workaround for PNOC
+ performance issue is applied which requires changing the mem-type
+ attribute via VMIDMT.
+- qcom,hsusb-otg-default-mode: The default USB mode after boot-up.
+ Applicable only when OTG is controlled by user. Can be one of
+ 0 - None. Low power mode
+ 1 - Peripheral
+ 2 - Host
+- qcom,hsusb-otg-phy-init-seq: PHY configuration sequence. val, reg pairs
+ terminate with -1
+- qcom,hsusb-otg-power-budget: VBUS power budget in mA
+ 0 will be treated as 500mA
+- qcom,hsusb-otg-pclk-src-name: The source of pclk
+- Refer to "Documentation/devicetree/bindings/arm/msm/msm-bus.txt" for
+ below optional properties:
+ - qcom,msm-bus,name
+ - qcom,msm-bus,num_cases - There are three valid cases for this: NONE, MAX
+ and MIN bandwidth votes. Minimum two cases must be defined for
+ both NONE and MAX votes. If MIN vote is different from NONE VOTE
+ then specify third case for MIN VOTE. If explicit NOC clock rates
+ are not specified then MAX value should be large enough to get
+ desired BUS frequencies. In case explicit NOC clock rates are
+ specified, peripheral mode bus bandwidth vote should be defined
+ to vote for arbitrated bandwidth so that 60MHz frequency is met.
+
+ - qcom,msm-bus,num_paths
+ - qcom,msm-bus,vectors
+- qcom,hsusb-otg-lpm-on-dev-suspend: If present then USB enter to
+ low power mode upon receiving bus suspend.
+- qcom,hsusb-otg-clk-always-on-workaround: If present then USB core clocks
+ remain active upon receiving bus suspend and USB cable is connected.
+ Used for allowing USB to respond for remote wakup.
+- qcom,hsusb-otg-delay-lpm: If present then USB core will wait one second
+ after disconnect before entering low power mode.
+- <supply-name>-supply: handle to the regulator device tree node.
+ Optional "supply-name" is "vbus_otg" to supply vbus in host mode.
+- qcom,dp-manual-pullup: If present, vbus is not routed to USB controller/phy
+ and controller driver therefore enables pull-up explicitly before
+ starting controller using usbcmd run/stop bit.
+- qcom,usb2-enable-hsphy2: If present then USB2 controller is connected to 2nd
+ HSPHY.
+- qcom,hsusb-log2-itc: value of 2^(log2_itc-1) will be used as the
+ interrupt threshold (ITC), when log2_itc is between 1 to 7.
+- qcom,hsusb-l1-supported: If present, the device supports l1 (Link power
+ management).
+- qcom,no-selective-suspend: If present selective suspend is disabled on hub ports.
+- qcom,hsusb-otg-mpm-dpsehv-int: If present, indicates mpm interrupt to be
+ configured for detection of dp line transition during VDD minimization.
+- qcom,hsusb-otg-mpm-dmsehv-int: If present, indicates mpm interrupt to be
+ configured for detection of dm line transition during VDD minimization.
+- pinctrl-names : This should be defined if a target uses gpio and pinctrl framework.
+ See "pinctrl" in Documentation/devicetree/bindings/pinctrl/msm-pinctrl.txt.
+ It should specify the names of the configs that pinctrl can install in driver
+ Following are the pinctrl config that can be installed
+ "hsusb_active" : Active configuration of pins, this should specify active
+ config of vddmin gpio (if used) defined in their pin groups.
+ "hsusb_sleep" : Disabled configuration of pins, this should specify sleep
+ config of vddmin gpio (if used) defined in their pin groups.
+- qcom,hsusb-otg-vddmin-gpio = If present, indicates a gpio that will be used
+ to supply voltage to the D+ line during VDD minimization and peripheral
+ bus suspend. If not exists, then VDD minimization will not be allowed
+ during peripheral bus suspend.
+- qcom,ahb-async-bridge-bypass: If present, indicates that enable AHB2AHB By Pass
+ mode with device controller for better throughput. With this mode, USB Core
+ runs using PNOC clock and synchronous to it. Hence it is must to have proper
+ "qcom,msm-bus,vectors" to have high bus frequency. User shouldn't try to
+ enable this feature without proper bus voting. When this feature is enabled,
+ it is required to do HW reset during cable disconnect for host mode functionality
+ working and hence need to disable qcom,hsusb-otg-disable-reset. With this feature
+ enabled, USB HW has to vote for maximum PNOC frequency as USB HW cannot tolerate
+ changes in PNOC frequency which results in USB functionality failure.
+- qcom,disable-retention-with-vdd-min: If present don't allow phy retention but allow
+ vdd min.
+- qcom,usbin-vadc: Corresponding vadc device's phandle to read usbin voltage using VADC.
+ This will be used to get value of usb power supply's VOLTAGE_NOW property.
+- qcom,usbid-gpio: This corresponds to gpio which is used for USB ID detection.
+- qcom,hub-reset-gpio: This corresponds to gpio which is used for HUB reset.
+- qcom,sw-sel-gpio: This corresponds to gpio which is used for switch select routing
+ of D+/D- between the USB HUB and type B USB jack for peripheral mode.
+- qcom,bus-clk-rate: If present, indicates nominal bus frequency to be voted for
+ bimc/snoc/pcnoc clock with usb cable connected. If AHB2AHB bypass is enabled,
+ pcnoc value should be defined to very large number so that PNOC runs at max
+ frequency. If 'qcom,default-mode-svs' is also set then two set of frequencies
+ must be specified for SVS and NOM modes which user can change using sysfs node.
+- qcom,phy-dvdd-always-on: If present PHY DVDD is supplied by a always-on
+ regulator unlike vddcx/vddmx. PHY can keep D+ pull-up and D+/D-
+ pull-down resistors during peripheral and host bus suspend without
+ any re-work.
+- qcom,emulation: Indicates that we are running on emulation platform.
+- qcom,boost-sysclk-with-streaming: If present, enable controller specific
+ streaming feature. Also this flag can bump up usb system clock to max in streaming
+ mode. This flag enables streaming mode for all compositions and is different from
+ streaming-func property defined in android device node. Please refer Doumentation/
+ devicetree/bindings/usb/android-dev.txt for details about "streaming-func" property.
+- qcom,axi-prefetch-enable: If present, AXI64 interface will be used for transferring data
+ to/from DDR by controller.
+- qcom,enable-sdp-typec-current-limit: Indicates whether type-c current for SDP CHARGER to
+ be limited.
+- qcom,enable-phy-id-pullup: If present, PHY can keep D+ pull-up resistor on USB ID line
+ during cable disconnect.
+- qcom,max-svs-sysclk-rate: Indicates system clock frequency voted by driver in
+ non-perf mode. In perf mode driver uses qcom,max-nominal-sysclk-rate.
+- qcom,pm-qos-latency: This represents max tolerable CPU latency in microsecs,
+ which is used as a vote by driver to get max performance in perf mode.
+- qcom,default-mode-svs: Indicates USB system clock should run at SVS frequency.
+ User can bump it up using 'perf_mode' sysfs attribute for gadget.
+- qcom,vbus-low-as-hostmode: If present, specifies USB_VBUS to switch to host mode
+ if USB_VBUS is low or device mode if USB_VBUS is high.
+- qcom,usbeth-reset-gpio: If present then an external usb-to-eth is connected to
+ the USB host controller and its RESET_N signal is connected to this
+ usbeth-reset-gpio GPIO. It should be driven LOW to RESET the usb-to-eth.
+- extcon: phandles to external connector devices. First phandle should point to
+ external connector, which provide "USB" cable events, the second should
+ point to external connector device, which provide "USB-HOST" cable events.
+ A single phandle may be specified if a single connector device provides
+ both "USB" and "USB-HOST" events.
+
+Example HSUSB OTG controller device node :
+ usb@f9690000 {
+ compatible = "qcom,hsusb-otg";
+ reg = <0xf9690000 0x400>;
+ reg-names = "core";
+ interrupts = <134>;
+ interrupt-names = "core_irq";
+
+ qcom,hsusb-otg-phy-type = <2>;
+ qcom,hsusb-otg-mode = <1>;
+ qcom,hsusb-otg-otg-control = <1>;
+ qcom,hsusb-otg-disable-reset;
+ qcom,hsusb-otg-pnoc-errata-fix;
+ qcom,hsusb-otg-default-mode = <2>;
+ qcom,hsusb-otg-phy-init-seq = <0x01 0x90 0xffffffff>;
+ qcom,hsusb-otg-power-budget = <500>;
+ qcom,hsusb-otg-pclk-src-name = "dfab_usb_clk";
+ qcom,hsusb-otg-lpm-on-dev-suspend;
+ qcom,hsusb-otg-clk-always-on-workaround;
+ hsusb_vdd_dig-supply = <&pm8226_s1_corner>;
+ HSUSB_1p8-supply = <&pm8226_l10>;
+ HSUSB_3p3-supply = <&pm8226_l20>;
+ qcom,vdd-voltage-level = <1 5 7>;
+ qcom,dp-manual-pullup;
+ qcom,hsusb-otg-mpm-dpsehv-int = <49>;
+ qcom,hsusb-otg-mpm-dmsehv-int = <58>;
+ qcom,max-nominal-sysclk-rate = <133330000>;
+ qcom,max-svs-sysclk-rate = <100000000>;
+ qcom,pm-qos-latency = <59>;
+
+ qcom,msm-bus,name = "usb2";
+ qcom,msm-bus,num_cases = <2>;
+ qcom,msm-bus,num_paths = <1>;
+ qcom,msm-bus,vectors =
+ <87 512 0 0>,
+ <87 512 60000000 960000000>;
+ pinctrl-names = "hsusb_active","hsusb_sleep";
+ pinctrl-0 = <&vddmin_act>;
+ pinctrl-0 = <&vddmin_sus>;
+ qcom,hsusb-otg-vddmin-gpio = <&pm8019_mpps 6 0>;
+ qcom,disable-retention-with-vdd-min;
+ qcom,usbin-vadc = <&pm8226_vadc>;
+ qcom,usbid-gpio = <&msm_gpio 110 0>;
};
-USB PHY with optional OTG:
+MSM HSUSB EHCI controller
-Required properties:
-- compatible: Should contain:
- "qcom,usb-otg-ci" for chipsets with ChipIdea 45nm PHY
- "qcom,usb-otg-snps" for chipsets with Synopsys 28nm PHY
+Required properties :
+- compatible : should be "qcom,ehci-host"
+- reg : offset and length of the register set in the memory map
+- interrupts: IRQ lines used by this controller
+- interrupt-names : Required interrupt resource entries are:
+ HSUSB EHCI expects "core_irq" and optionally "async_irq".
+- <supply-name>-supply: handle to the regulator device tree node
+ Required "supply-name" is either "hsusb_vdd_dig" or "HSUSB_VDDCX"
+ "HSUSB_1p8-supply" "HSUSB_3p3-supply".
+- qcom,usb2-power-budget: maximum vbus power (in mA) that can be provided.
+- qcom,vdd-voltage-level: This property must be a list of five integer
+ values (no, 0.5vsuspend, 0.75suspend, min, max) where each value respresents
+ either a voltage in microvolts or a value corresponding to voltage corner.
+ First value represents value to vote when USB is not at all active, second
+ value represents value to vote when target is not connected to dock during low
+ power mode, third value represents vlaue to vote when target is connected to dock
+ and no peripheral connected over dock during low power mode, fourth value represents
+ minimum value to vote when USB is operational, fifth item represents maximum value
+ to vote for USB is operational.
-- regs: Offset and length of the register set in the memory map
-- interrupts: interrupt-specifier for the OTG interrupt.
+Optional properties :
+- qcom,usb2-enable-hsphy2: If present, select second PHY for USB operation.
+- pinctrl-names : This should be defined if a target uses pinctrl framework.
+ See "pinctrl" in Documentation/devicetree/bindings/pinctrl/msm-pinctrl.txt.
+ It should specify the names of the configs that pinctrl can install in driver
+ Following are the pinctrl configs that can be installed
+ "ehci_active" : Active configuration of pins, this should specify active
+ config defined in pin groups of used gpio's from resume and
+ ext-hub-reset.
+ "ehci_sleep" : Disabled configuration of pins, this should specify sleep
+ config defined in pin groups of used gpio's from resume and
+ ext-hub-reset.
+- qcom,resume-gpio: if present then peripheral connected to usb controller
+ cannot wakeup from XO shutdown using in-band usb bus resume. Use resume
+ gpio to wakeup peripheral.
+- qcom,ext-hub-reset-gpio: If present then an external HUB is connected to
+ the USB host controller and its RESET_N signal is connected to this
+ ext-hub-reset-gpio GPIO. It should be driven LOW to RESET the HUB.
+- qcom,usb2-enable-uicc: If present, usb2 port will be used for uicc card connection.
+- usb-phy: phandle for the PHY device, if described as a separate device tree node
+- qcom,pm-qos-latency: This property represents the maximum tolerable CPU latency in
+ microsecs, which is used as a vote to keep the CPUs in a high enough power state when
+ USB bus is in use (not suspended).
+- Refer to "Documentation/devicetree/bindings/arm/msm/msm-bus.txt" for
+ below optional properties:
+ - qcom,msm-bus,name
+ - qcom,msm-bus,num_cases - Two cases (NONE and MAX) for voting are supported.
+ - qcom,msm-bus,num_paths
+ - qcom,msm-bus,vectors
-- clocks: A list of phandle + clock-specifier pairs for the
- clocks listed in clock-names
-- clock-names: Should contain the following:
- "phy" USB PHY reference clock
- "core" Protocol engine clock
- "iface" Interface bus clock
- "alt_core" Protocol engine clock for targets with asynchronous
- reset methodology. (optional)
-
-- vdccx-supply: phandle to the regulator for the vdd supply for
- digital circuit operation.
-- v1p8-supply: phandle to the regulator for the 1.8V supply
-- v3p3-supply: phandle to the regulator for the 3.3V supply
-
-- resets: A list of phandle + reset-specifier pairs for the
- resets listed in reset-names
-- reset-names: Should contain the following:
- "phy" USB PHY controller reset
- "link" USB LINK controller reset
-
-- qcom,otg-control: OTG control (VBUS and ID notifications) can be one of
- 1 - PHY control
- 2 - PMIC control
-
-Optional properties:
-- dr_mode: One of "host", "peripheral" or "otg". Defaults to "otg"
-
-- switch-gpio: A phandle + gpio-specifier pair. Some boards are using Dual
- SPDT USB Switch, witch is cotrolled by GPIO to de/multiplex
- D+/D- USB lines between connectors.
-
-- qcom,phy-init-sequence: PHY configuration sequence values. This is related to Device
- Mode Eye Diagram test. Start address at which these values will be
- written is ULPI_EXT_VENDOR_SPECIFIC. Value of -1 is reserved as
- "do not overwrite default value at this address".
- For example: qcom,phy-init-sequence = < -1 0x63 >;
- Will update only value at address ULPI_EXT_VENDOR_SPECIFIC + 1.
-
-- qcom,phy-num: Select number of pyco-phy to use, can be one of
- 0 - PHY one, default
- 1 - Second PHY
- Some platforms may have configuration to allow USB
- controller work with any of the two HSPHYs present.
-
-- qcom,vdd-levels: This property must be a list of three integer values
- (no, min, max) where each value represents either a voltage
- in microvolts or a value corresponding to voltage corner.
-
-- qcom,manual-pullup: If present, vbus is not routed to USB controller/phy
- and controller driver therefore enables pull-up explicitly
- before starting controller using usbcmd run/stop bit.
-
-- extcon: phandles to external connector devices. First phandle
- should point to external connector, which provide "USB"
- cable events, the second should point to external connector
- device, which provide "USB-HOST" cable events. If one of
- the external connector devices is not required empty <0>
- phandle should be specified.
-
-Example HSUSB OTG controller device node:
-
- usb@f9a55000 {
- compatible = "qcom,usb-otg-snps";
- reg = <0xf9a55000 0x400>;
- interrupts = <0 134 0>;
- dr_mode = "peripheral";
-
- clocks = <&gcc GCC_XO_CLK>, <&gcc GCC_USB_HS_SYSTEM_CLK>,
- <&gcc GCC_USB_HS_AHB_CLK>;
-
- clock-names = "phy", "core", "iface";
-
- vddcx-supply = <&pm8841_s2_corner>;
- v1p8-supply = <&pm8941_l6>;
- v3p3-supply = <&pm8941_l24>;
-
- resets = <&gcc GCC_USB2A_PHY_BCR>, <&gcc GCC_USB_HS_BCR>;
- reset-names = "phy", "link";
-
- qcom,otg-control = <1>;
- qcom,phy-init-sequence = < -1 0x63 >;
- qcom,vdd-levels = <1 5 7>;
+Example MSM HSUSB EHCI controller device node :
+ ehci: qcom,ehci-host@f9a55000 {
+ compatible = "qcom,ehci-host";
+ reg = <0xf9a55000 0x400>;
+ interrupts = <0 134 0>, <0 140 0>;
+ interrupt-names = "core_irq", "async_irq";
+ /* If pinctrl is used and ext-hub-reset and resume gpio's are present*/
+ pinctrl-names = "ehci_active","ehci_sleep";
+ pinctrl-0 = <&ehci_reset_act &resume_act>;
+ pinctrl-1 = <&ehci_reset_sus &resume_sus>;
+ qcom,resume-gpio = <&msm_gpio 80 0>;
+ qcom,ext-hub-reset-gpio = <&msm_gpio 0 0>;
+ hsusb_vdd_dig-supply = <&pm8841_s2_corner>;
+ HSUSB_1p8-supply = <&pm8941_l6>;
+ HSUSB_3p3-supply = <&pm8941_l24>;
+ qcom,usb2-enable-hsphy2;
+ qcom,usb2-power-budget = <500>;
+ qcom,vdd-voltage-level = <1 2 3 5 7>;
+ qcom,usb2-enable-uicc;
};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-audio-overlay.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-audio-overlay.dtsi
index f90bd7f..e51d54b 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-audio-overlay.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-audio-overlay.dtsi
@@ -17,7 +17,6 @@
&snd_934x {
qcom,audio-routing =
- "AIF4 VI", "MCLK",
"RX_BIAS", "MCLK",
"MADINPUT", "MCLK",
"AMIC2", "MIC BIAS2",
@@ -28,8 +27,6 @@
"MIC BIAS2", "ANCLeft Headset Mic",
"AMIC5", "MIC BIAS3",
"MIC BIAS3", "Handset Mic",
- "DMIC0", "MIC BIAS1",
- "MIC BIAS1", "Digital Mic0",
"DMIC1", "MIC BIAS1",
"MIC BIAS1", "Digital Mic1",
"DMIC2", "MIC BIAS3",
@@ -40,14 +37,13 @@
"MIC BIAS4", "Digital Mic4",
"DMIC5", "MIC BIAS4",
"MIC BIAS4", "Digital Mic5",
- "SpkrLeft IN", "SPK1 OUT",
"SpkrRight IN", "SPK2 OUT";
qcom,msm-mbhc-hphl-swh = <1>;
qcom,msm-mbhc-gnd-swh = <1>;
qcom,msm-mbhc-hs-mic-max-threshold-mv = <1700>;
qcom,msm-mbhc-hs-mic-min-threshold-mv = <50>;
- qcom,tavil-mclk-clk-freq = <12288000>;
+ qcom,tavil-mclk-clk-freq = <9600000>;
asoc-codec = <&stub_codec>;
asoc-codec-names = "msm-stub-codec.1";
@@ -66,23 +62,24 @@
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tlmm>;
- qcom,gpio-connect = <&tlmm 71 0>;
+ qcom,gpio-connect = <&tlmm 90 0>;
pinctrl-names = "default";
pinctrl-0 = <&wcd_intr_default>;
};
clock_audio_up: audio_ext_clk_up {
compatible = "qcom,audio-ref-clk";
- qcom,codec-mclk-clk-freq = <12288000>;
+ qcom,audio-ref-clk-gpio = <&tlmm 62 0>;
+ qcom,codec-mclk-clk-freq = <9600000>;
pinctrl-names = "sleep", "active";
pinctrl-0 = <&i2s_mclk_sleep>;
pinctrl-1 = <&i2s_mclk_active>;
#clock-cells = <1>;
};
- wcd_rst_gpio: msm_cdc_pinctrl@77 {
+ wcd_rst_gpio: msm_cdc_pinctrl@86 {
compatible = "qcom,msm-cdc-pinctrl";
- qcom,cdc-rst-n-gpio = <&tlmm 77 0>;
+ qcom,cdc-rst-n-gpio = <&tlmm 86 0>;
pinctrl-names = "aud_active", "aud_sleep";
pinctrl-0 = <&cdc_reset_active>;
pinctrl-1 = <&cdc_reset_sleep>;
@@ -91,8 +88,8 @@
&i2c_3 {
wcd934x_cdc: tavil_codec {
- compatible = "qcom,tavil-i2c-pgd";
- elemental-addr = [00 01 50 02 17 02];
+ compatible = "qcom,tavil-i2c";
+ reg = <0x0d>;
interrupt-parent = <&wcd9xxx_intc>;
interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
@@ -135,7 +132,7 @@
qcom,cdc-micbias3-mv = <1800>;
qcom,cdc-micbias4-mv = <1800>;
- qcom,cdc-mclk-clk-rate = <12288000>;
+ qcom,cdc-mclk-clk-rate = <9600000>;
qcom,cdc-dmic-sample-rate = <4800000>;
qcom,wdsp-cmpnt-dev-name = "tavil_codec";
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-blsp.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-blsp.dtsi
index 4fe2d1e..13e1fc3 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-blsp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-blsp.dtsi
@@ -103,7 +103,6 @@
pinctrl-names = "i2c_active", "i2c_sleep";
pinctrl-0 = <&i2c_3_active>;
pinctrl-1 = <&i2c_3_sleep>;
- status = "disabled";
};
i2c_4: i2c@838000 { /* BLSP1 QUP4: GPIO: 76,77 */
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-cdp.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-cdp.dts
index 89945e3..94ccf9c 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-cdp.dts
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-cdp.dts
@@ -15,6 +15,7 @@
#include "sdxpoorwills.dtsi"
#include "sdxpoorwills-pinctrl.dtsi"
+#include "sdxpoorwills-cdp-audio-overlay.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDXPOORWILLS CDP";
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-ion.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-ion.dtsi
index afc88969..a09b149 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-ion.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-ion.dtsi
@@ -20,5 +20,11 @@
reg = <25>;
qcom,ion-heap-type = "SYSTEM";
};
+
+ qcom,ion-heap@28 { /* AUDIO HEAP */
+ reg = <28>;
+ memory-region = <&audio_mem>;
+ qcom,ion-heap-type = "DMA";
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
index 9b8e751..82b65e2 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
@@ -947,12 +947,12 @@
wcd9xxx_intr {
wcd_intr_default: wcd_intr_default{
mux {
- pins = "gpio71";
+ pins = "gpio90";
function = "gpio";
};
config {
- pins = "gpio71";
+ pins = "gpio90";
drive-strength = <2>; /* 2 mA */
bias-pull-down; /* pull down */
input-enable;
@@ -963,11 +963,11 @@
cdc_reset_ctrl {
cdc_reset_sleep: cdc_reset_sleep {
mux {
- pins = "gpio77";
+ pins = "gpio86";
function = "gpio";
};
config {
- pins = "gpio77";
+ pins = "gpio86";
drive-strength = <2>;
bias-disable;
output-low;
@@ -976,11 +976,11 @@
cdc_reset_active:cdc_reset_active {
mux {
- pins = "gpio77";
+ pins = "gpio86";
function = "gpio";
};
config {
- pins = "gpio77";
+ pins = "gpio86";
drive-strength = <8>;
bias-pull-down;
output-high;
@@ -1063,7 +1063,7 @@
pri_ws_active_master: pri_ws_active_master {
mux {
pins = "gpio12";
- function = "pri_mi2s_ws_a";
+ function = "pri_mi2s";
};
config {
@@ -1077,7 +1077,7 @@
pri_sck_active_master: pri_sck_active_master {
mux {
pins = "gpio15";
- function = "pri_mi2s_sck_a";
+ function = "pri_mi2s";
};
config {
@@ -1091,7 +1091,7 @@
pri_ws_active_slave: pri_ws_active_slave {
mux {
pins = "gpio12";
- function = "pri_mi2s_ws_a";
+ function = "pri_mi2s";
};
config {
@@ -1104,7 +1104,7 @@
pri_sck_active_slave: pri_sck_active_slave {
mux {
pins = "gpio15";
- function = "pri_mi2s_sck_a";
+ function = "pri_mi2s";
};
config {
@@ -1117,7 +1117,7 @@
pri_dout_active: pri_dout_active {
mux {
pins = "gpio14";
- function = "pri_mi2s_data1_a";
+ function = "pri_mi2s";
};
config {
@@ -1147,7 +1147,7 @@
pri_din_active: pri_din_active {
mux {
pins = "gpio13";
- function = "pri_mi2s_data0_a";
+ function = "pri_mi2s";
};
config {
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
index f5351de..2706f21 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -698,6 +698,20 @@
"tx-ch3-intr", "tx-ch4-intr",
"rx-ch0-intr", "rx-ch1-intr",
"rx-ch2-intr", "rx-ch3-intr";
+ qcom,msm-bus,name = "emac";
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <2>;
+ qcom,msm-bus,vectors-KBps =
+ <98 512 1250 0>, <1 781 0 40000>, /* 10Mbps vote */
+ <98 512 12500 0>, <1 781 0 40000>, /* 100Mbps vote */
+ <98 512 125000 0>, <1 781 0 40000>; /* 1000Mbps vote */
+ qcom,bus-vector-names = "10", "100", "1000";
+ clocks = <&clock_gcc GCC_ETH_AXI_CLK>,
+ <&clock_gcc GCC_ETH_PTP_CLK>,
+ <&clock_gcc GCC_ETH_RGMII_CLK>,
+ <&clock_gcc GCC_ETH_SLAVE_AHB_CLK>;
+ clock-names = "eth_axi_clk", "eth_ptp_clk",
+ "eth_rgmii_clk", "eth_slave_ahb_clk";
io-macro-info {
io-macro-bypass-mode = <0>;
io-interface = "rgmii";
diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig
index d3c8152..834dfb8 100644
--- a/arch/arm/configs/sdxpoorwills-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-perf_defconfig
@@ -254,7 +254,6 @@
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_MSM=y
CONFIG_USB_ACM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_STORAGE_DEBUG=y
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index b1eca8f..d35cecb 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -121,6 +121,7 @@
select PM_OPP
select MFD_CORE
select SND_SOC_COMPRESS
+ select SND_HWDEP
help
This enables support for the ARMv8 based Qualcomm chipsets.
@@ -162,6 +163,15 @@
This enables support for the sdm450 chipset. If you do not
wish to build a kernel that runs on this chipset, say 'N' here.
+config ARCH_SDM632
+ bool "Enable Support for Qualcomm Technologies Inc. SDM632"
+ depends on ARCH_QCOM
+ select CPU_FREQ_QCOM
+ select COMMON_CLK_MSM
+ help
+ This enables support for the sdm632 chipset. If you do not
+ wish to build a kernel that runs on this chipset, say 'N' here.
+
config ARCH_ROCKCHIP
bool "Rockchip Platforms"
select ARCH_HAS_RESET_CONTROLLER
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 5a7e17e..eaa15ce 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -126,6 +126,7 @@
sda670-pm660a-mtp-overlay.dtbo \
qcs605-cdp-overlay.dtbo \
qcs605-mtp-overlay.dtbo \
+ qcs605-360camera-overlay.dtbo \
qcs605-external-codec-mtp-overlay.dtbo \
qcs605-lc-mtp-overlay.dtbo
@@ -156,6 +157,7 @@
qcs605-mtp-overlay.dtbo-base := qcs605.dtb
qcs605-external-codec-mtp-overlay.dtbo-base := qcs605.dtb
qcs605-lc-mtp-overlay.dtbo-base := qcs605.dtb
+qcs605-360camera-overlay.dtbo-base := qcs605.dtb
else
dtb-$(CONFIG_ARCH_SDM670) += sdm670-rumi.dtb \
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-cmd.dtsi
index 5529ed1..32892a7 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-cmd.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -181,7 +181,7 @@
15 01 00 00 00 00 02 ec 00
15 01 00 00 00 00 02 ff 10
15 01 00 00 00 00 02 bb 10
- 15 01 00 00 00 00 02 35 02
+ 15 01 00 00 00 00 02 35 00
05 01 00 00 78 00 02 11 00
05 01 00 00 78 00 02 29 00];
qcom,mdss-dsi-off-command = [05 01 00 00 14
diff --git a/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi
index 1990b65..a57ea7c 100644
--- a/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi
+++ b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi
@@ -16,6 +16,15 @@
qcom,fg-cc-cv-threshold-mv = <4340>;
qcom,fastchg-current-ma = <3450>;
qcom,batt-id-kohm = <60>;
+ qcom,jeita-fcc-ranges = <0 100 1725000
+ 101 400 3450000
+ 401 450 2760000>;
+ qcom,jeita-fv-ranges = <0 100 4250000
+ 101 400 4350000
+ 401 450 4250000>;
+ qcom,step-chg-ranges = <3600000 4200000 3450000
+ 4201000 4300000 2760000
+ 4301000 4350000 2070000>;
qcom,battery-beta = <3435>;
qcom,battery-type = "ascent_3450mah_averaged_masterslave_oct30th2017";
qcom,checksum = <0xAAE2>;
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-8953.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-8953.dtsi
new file mode 100644
index 0000000..e794472
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-8953.dtsi
@@ -0,0 +1,103 @@
+/*
+ *Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/clock/msm-clocks-8953.h>
+
+&soc {
+ kgsl_smmu: arm,smmu-kgsl@1c40000 {
+ status = "ok";
+ compatible = "qcom,smmu-v2";
+ qcom,tz-device-id = "GPU";
+ reg = <0x1c40000 0x10000>;
+ #iommu-cells = <1>;
+ #global-interrupts = <0>;
+ interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,dynamic;
+ qcom,use-3-lvl-tables;
+ qcom,enable-static-cb;
+ qcom,enable-smmu-halt;
+ qcom,skip-init;
+ vdd-supply = <&gdsc_oxili_cx>;
+ qcom,regulator-names = "vdd";
+ clocks = <&clock_gcc_gfx clk_gcc_oxili_ahb_clk>,
+ <&clock_gcc_gfx clk_gcc_bimc_gfx_clk>;
+ clock-names = "gpu_ahb_clk", "gcc_bimc_gfx_clk";
+ };
+
+ /* A test device to test the SMMU operation */
+ kgsl_iommu_test_device0 {
+ compatible = "iommu-debug-test";
+ /* The SID should be valid one to get the proper
+ *SMR,S2CR indices.
+ */
+ iommus = <&kgsl_smmu 0x0>;
+ };
+
+ apps_iommu: qcom,iommu@1e00000 {
+ status = "disabled";
+ compatible = "qcom,qsmmu-v500";
+ reg = <0x1e00000 0x40000>,
+ <0x1ee2000 0x20>;
+ reg-names = "base", "tcu-base";
+ #iommu-cells = <2>;
+ qcom,tz-device-id = "APPS";
+ qcom,skip-init;
+ qcom,enable-static-cb;
+ qcom,use-3-lvl-tables;
+ qcom,disable-atos;
+ #global-interrupts = <0>;
+ #size-cells = <1>;
+ #address-cells = <1>;
+ ranges;
+ interrupts = <GIC_SPI 253 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 255 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clock_gcc clk_gcc_smmu_cfg_clk>,
+ <&clock_gcc clk_gcc_apss_tcu_async_clk>;
+ clock-names = "iface_clk", "core_clk";
+ };
+};
+
+#include "msm-arm-smmu-impl-defs-8953.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-impl-defs-8953.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-impl-defs-8953.dtsi
new file mode 100644
index 0000000..2122db9
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-impl-defs-8953.dtsi
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&kgsl_smmu {
+ attach-impl-defs = <0x6000 0x270>,
+ <0x6060 0x1055>,
+ <0x6800 0x6>,
+ <0x6900 0x3ff>,
+ <0x6924 0x204>,
+ <0x6928 0x10800>,
+ <0x6930 0x400>,
+ <0x6960 0xffffffff>,
+ <0x6b64 0xa0000>,
+ <0x6b68 0xaaab92a>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-ion.dtsi b/arch/arm64/boot/dts/qcom/msm8953-ion.dtsi
new file mode 100644
index 0000000..34004b0
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-ion.dtsi
@@ -0,0 +1,36 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ qcom,ion {
+ compatible = "qcom,msm-ion";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,ion-heap@25 {
+ reg = <25>;
+ qcom,ion-heap-type = "SYSTEM";
+ };
+
+ qcom,ion-heap@8 { /* CP_MM HEAP */
+ reg = <8>;
+ memory-region = <&secure_mem>;
+ qcom,ion-heap-type = "SECURE_DMA";
+ };
+
+ qcom,ion-heap@27 { /* QSEECOM HEAP */
+ reg = <27>;
+ memory-region = <&qseecom_mem>;
+ qcom,ion-heap-type = "DMA";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/msm8953-pinctrl.dtsi
index e3ada39..a45bb66 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-pinctrl.dtsi
@@ -424,6 +424,32 @@
};
};
+ blsp2_uart0_active: blsp2_uart0_active {
+ mux {
+ pins = "gpio16", "gpio17", "gpio18", "gpio19";
+ function = "blsp_uart5";
+ };
+
+ config {
+ pins = "gpio16", "gpio17", "gpio18", "gpio19";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ blsp2_uart0_sleep: blsp2_uart0_sleep {
+ mux {
+ pins = "gpio16", "gpio17", "gpio18", "gpio19";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio16", "gpio17", "gpio18", "gpio19";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
/* SDC pin type */
sdc1_clk_on: sdc1_clk_on {
config {
diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi
index a5245af..b4631e9 100644
--- a/arch/arm64/boot/dts/qcom/msm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi
@@ -119,6 +119,10 @@
smd36 = &smdtty_loopback;
sdhc1 = &sdhc_1; /* SDC1 eMMC slot */
sdhc2 = &sdhc_2; /* SDC2 for SD card */
+ i2c2 = &i2c_2;
+ i2c3 = &i2c_3;
+ i2c5 = &i2c_5;
+ spi3 = &spi_3;
};
soc: soc { };
@@ -130,6 +134,8 @@
#include "msm8953-pm.dtsi"
#include "msm8953-bus.dtsi"
#include "msm8953-coresight.dtsi"
+#include "msm8953-ion.dtsi"
+#include "msm-arm-smmu-8953.dtsi"
&soc {
#address-cells = <1>;
@@ -493,6 +499,115 @@
#thermal-sensor-cells = <1>;
};
+ qcom_seecom: qseecom@85b00000 {
+ compatible = "qcom,qseecom";
+ reg = <0x85b00000 0x800000>;
+ reg-names = "secapp-region";
+ qcom,hlos-num-ce-hw-instances = <1>;
+ qcom,hlos-ce-hw-instance = <0>;
+ qcom,qsee-ce-hw-instance = <0>;
+ qcom,disk-encrypt-pipe-pair = <2>;
+ qcom,support-fde;
+ qcom,msm-bus,name = "qseecom-noc";
+ qcom,msm-bus,num-cases = <4>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,support-bus-scaling;
+ qcom,msm-bus,vectors-KBps =
+ <55 512 0 0>,
+ <55 512 0 0>,
+ <55 512 120000 1200000>,
+ <55 512 393600 3936000>;
+ clocks = <&clock_gcc clk_crypto_clk_src>,
+ <&clock_gcc clk_gcc_crypto_clk>,
+ <&clock_gcc clk_gcc_crypto_ahb_clk>,
+ <&clock_gcc clk_gcc_crypto_axi_clk>;
+ clock-names = "core_clk_src", "core_clk",
+ "iface_clk", "bus_clk";
+ qcom,ce-opp-freq = <100000000>;
+ status = "okay";
+ };
+
+ qcom_tzlog: tz-log@08600720 {
+ compatible = "qcom,tz-log";
+ reg = <0x08600720 0x2000>;
+ status = "okay";
+ };
+
+ qcom_rng: qrng@e3000 {
+ compatible = "qcom,msm-rng";
+ reg = <0xe3000 0x1000>;
+ qcom,msm-rng-iface-clk;
+ qcom,no-qrng-config;
+ qcom,msm-bus,name = "msm-rng-noc";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <1 618 0 0>, /* No vote */
+ <1 618 0 800>; /* 100 MB/s */
+ clocks = <&clock_gcc clk_gcc_prng_ahb_clk>;
+ clock-names = "iface_clk";
+ status = "okay";
+ };
+
+ qcom_crypto: qcrypto@720000 {
+ compatible = "qcom,qcrypto";
+ reg = <0x720000 0x20000>,
+ <0x704000 0x20000>;
+ reg-names = "crypto-base","crypto-bam-base";
+ interrupts = <0 207 0>;
+ qcom,bam-pipe-pair = <2>;
+ qcom,ce-hw-instance = <0>;
+ qcom,ce-device = <0>;
+ qcom,ce-hw-shared;
+ qcom,clk-mgmt-sus-res;
+ qcom,msm-bus,name = "qcrypto-noc";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <55 512 0 0>,
+ <55 512 393600 393600>;
+ clocks = <&clock_gcc clk_crypto_clk_src>,
+ <&clock_gcc clk_gcc_crypto_clk>,
+ <&clock_gcc clk_gcc_crypto_ahb_clk>,
+ <&clock_gcc clk_gcc_crypto_axi_clk>;
+ clock-names = "core_clk_src", "core_clk",
+ "iface_clk", "bus_clk";
+ qcom,use-sw-aes-cbc-ecb-ctr-algo;
+ qcom,use-sw-aes-xts-algo;
+ qcom,use-sw-aes-ccm-algo;
+ qcom,use-sw-ahash-algo;
+ qcom,use-sw-hmac-algo;
+ qcom,use-sw-aead-algo;
+ qcom,ce-opp-freq = <100000000>;
+ status = "okay";
+ };
+
+ qcom_cedev: qcedev@720000 {
+ compatible = "qcom,qcedev";
+ reg = <0x720000 0x20000>,
+ <0x704000 0x20000>;
+ reg-names = "crypto-base","crypto-bam-base";
+ interrupts = <0 207 0>;
+ qcom,bam-pipe-pair = <1>;
+ qcom,ce-hw-instance = <0>;
+ qcom,ce-device = <0>;
+ qcom,ce-hw-shared;
+ qcom,msm-bus,name = "qcedev-noc";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <55 512 0 0>,
+ <55 512 393600 393600>;
+ clocks = <&clock_gcc clk_crypto_clk_src>,
+ <&clock_gcc clk_gcc_crypto_clk>,
+ <&clock_gcc clk_gcc_crypto_ahb_clk>,
+ <&clock_gcc clk_gcc_crypto_axi_clk>;
+ clock-names = "core_clk_src", "core_clk",
+ "iface_clk", "bus_clk";
+ qcom,ce-opp-freq = <100000000>;
+ status = "okay";
+ };
+
blsp1_uart0: serial@78af000 {
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
reg = <0x78af000 0x200>;
@@ -503,6 +618,78 @@
status = "disabled";
};
+ blsp1_uart1: uart@78b0000 {
+ compatible = "qcom,msm-hsuart-v14";
+ reg = <0x78b0000 0x200>,
+ <0x7884000 0x1f000>;
+ reg-names = "core_mem", "bam_mem";
+
+ interrupt-names = "core_irq", "bam_irq", "wakeup_irq";
+ #address-cells = <0>;
+ interrupt-parent = <&blsp1_uart1>;
+ interrupts = <0 1 2>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xffffffff>;
+ interrupt-map = <0 &intc 0 108 0
+ 1 &intc 0 238 0
+ 2 &tlmm 13 0>;
+
+ qcom,inject-rx-on-wakeup;
+ qcom,rx-char-to-inject = <0xFD>;
+ qcom,master-id = <86>;
+ clock-names = "core_clk", "iface_clk";
+ clocks = <&clock_gcc clk_gcc_blsp1_uart2_apps_clk>,
+ <&clock_gcc clk_gcc_blsp1_ahb_clk>;
+ pinctrl-names = "sleep", "default";
+ pinctrl-0 = <&hsuart_sleep>;
+ pinctrl-1 = <&hsuart_active>;
+ qcom,bam-tx-ep-pipe-index = <2>;
+ qcom,bam-rx-ep-pipe-index = <3>;
+ qcom,msm-bus,name = "blsp1_uart1";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <86 512 0 0>,
+ <86 512 500 800>;
+ status = "disabled";
+ };
+
+ blsp2_uart0: uart@7aef000 {
+ compatible = "qcom,msm-hsuart-v14";
+ reg = <0x7aef000 0x200>,
+ <0x7ac4000 0x1f000>;
+ reg-names = "core_mem", "bam_mem";
+
+ interrupt-names = "core_irq", "bam_irq", "wakeup_irq";
+ #address-cells = <0>;
+ interrupt-parent = <&blsp2_uart0>;
+ interrupts = <0 1 2>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xffffffff>;
+ interrupt-map = <0 &intc 0 306 0
+ 1 &intc 0 239 0
+ 2 &tlmm 17 0>;
+
+ qcom,inject-rx-on-wakeup;
+ qcom,rx-char-to-inject = <0xFD>;
+ qcom,master-id = <84>;
+ clock-names = "core_clk", "iface_clk";
+ clocks = <&clock_gcc clk_gcc_blsp2_uart1_apps_clk>,
+ <&clock_gcc clk_gcc_blsp2_ahb_clk>;
+ pinctrl-names = "sleep", "default";
+ pinctrl-0 = <&blsp2_uart0_sleep>;
+ pinctrl-1 = <&blsp2_uart0_active>;
+ qcom,bam-tx-ep-pipe-index = <0>;
+ qcom,bam-rx-ep-pipe-index = <1>;
+ qcom,msm-bus,name = "blsp2_uart0";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <84 512 0 0>,
+ <84 512 500 800>;
+ status = "disabled";
+ };
+
dma_blsp1: qcom,sps-dma@7884000 { /* BLSP1 */
#dma-cells = <4>;
compatible = "qcom,sps-dma";
@@ -519,6 +706,110 @@
qcom,summing-threshold = <10>;
};
+ spi_3: spi@78b7000 { /* BLSP1 QUP3 */
+ compatible = "qcom,spi-qup-v2";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg-names = "spi_physical", "spi_bam_physical";
+ reg = <0x78b7000 0x600>,
+ <0x7884000 0x1f000>;
+ interrupt-names = "spi_irq", "spi_bam_irq";
+ interrupts = <0 97 0>, <0 238 0>;
+ spi-max-frequency = <19200000>;
+ pinctrl-names = "spi_default", "spi_sleep";
+ pinctrl-0 = <&spi3_default &spi3_cs0_active>;
+ pinctrl-1 = <&spi3_sleep &spi3_cs0_sleep>;
+ clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+ <&clock_gcc clk_gcc_blsp1_qup3_spi_apps_clk>;
+ clock-names = "iface_clk", "core_clk";
+ qcom,infinite-mode = <0>;
+ qcom,use-bam;
+ qcom,use-pinctrl;
+ qcom,ver-reg-exists;
+ qcom,bam-consumer-pipe-index = <8>;
+ qcom,bam-producer-pipe-index = <9>;
+ qcom,master-id = <86>;
+ status = "disabled";
+ };
+
+ i2c_2: i2c@78b6000 { /* BLSP1 QUP2 */
+ compatible = "qcom,i2c-msm-v2";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg-names = "qup_phys_addr";
+ reg = <0x78b6000 0x600>;
+ interrupt-names = "qup_irq";
+ interrupts = <0 96 0>;
+ qcom,clk-freq-out = <400000>;
+ qcom,clk-freq-in = <19200000>;
+ clock-names = "iface_clk", "core_clk";
+ clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+ <&clock_gcc clk_gcc_blsp1_qup2_i2c_apps_clk>;
+
+ pinctrl-names = "i2c_active", "i2c_sleep";
+ pinctrl-0 = <&i2c_2_active>;
+ pinctrl-1 = <&i2c_2_sleep>;
+ qcom,noise-rjct-scl = <0>;
+ qcom,noise-rjct-sda = <0>;
+ qcom,master-id = <86>;
+ dmas = <&dma_blsp1 6 64 0x20000020 0x20>,
+ <&dma_blsp1 7 32 0x20000020 0x20>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ i2c_3: i2c@78b7000 { /* BLSP1 QUP3 */
+ compatible = "qcom,i2c-msm-v2";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg-names = "qup_phys_addr";
+ reg = <0x78b7000 0x600>;
+ interrupt-names = "qup_irq";
+ interrupts = <0 97 0>;
+ qcom,clk-freq-out = <400000>;
+ qcom,clk-freq-in = <19200000>;
+ clock-names = "iface_clk", "core_clk";
+ clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+ <&clock_gcc clk_gcc_blsp1_qup3_i2c_apps_clk>;
+
+ pinctrl-names = "i2c_active", "i2c_sleep";
+ pinctrl-0 = <&i2c_3_active>;
+ pinctrl-1 = <&i2c_3_sleep>;
+ qcom,noise-rjct-scl = <0>;
+ qcom,noise-rjct-sda = <0>;
+ qcom,master-id = <86>;
+ dmas = <&dma_blsp1 8 64 0x20000020 0x20>,
+ <&dma_blsp1 9 32 0x20000020 0x20>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ i2c_5: i2c@7af5000 { /* BLSP2 QUP1 */
+ compatible = "qcom,i2c-msm-v2";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg-names = "qup_phys_addr";
+ reg = <0x7af5000 0x600>;
+ interrupt-names = "qup_irq";
+ interrupts = <0 299 0>;
+ qcom,clk-freq-out = <400000>;
+ qcom,clk-freq-in = <19200000>;
+ clock-names = "iface_clk", "core_clk";
+ clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+ <&clock_gcc clk_gcc_blsp2_qup1_i2c_apps_clk>;
+
+ pinctrl-names = "i2c_active", "i2c_sleep";
+ pinctrl-0 = <&i2c_5_active>;
+ pinctrl-1 = <&i2c_5_sleep>;
+ qcom,noise-rjct-scl = <0>;
+ qcom,noise-rjct-sda = <0>;
+ qcom,master-id = <84>;
+ dmas = <&dma_blsp2 4 64 0x20000020 0x20>,
+ <&dma_blsp2 5 32 0x20000020 0x20>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
slim_msm: slim@c140000{
cell-index = <1>;
compatible = "qcom,slim-ngd";
@@ -1160,6 +1451,39 @@
status = "disabled";
};
+ ipa_hw: qcom,ipa@07900000 {
+ compatible = "qcom,ipa";
+ reg = <0x07900000 0x4effc>, <0x07904000 0x26934>;
+ reg-names = "ipa-base", "bam-base";
+ interrupts = <0 228 0>,
+ <0 230 0>;
+ interrupt-names = "ipa-irq", "bam-irq";
+ qcom,ipa-hw-ver = <6>; /* IPA core version = IPAv2.6L */
+ qcom,ipa-hw-mode = <0>; /* IPA hw type = Normal */
+ qcom,wan-rx-ring-size = <192>; /* IPA WAN-rx-ring-size*/
+ qcom,lan-rx-ring-size = <192>; /* IPA LAN-rx-ring-size*/
+ clock-names = "core_clk";
+ clocks = <&clock_gcc clk_ipa_clk>;
+ qcom,ee = <0>;
+ qcom,use-ipa-tethering-bridge;
+ qcom,modem-cfg-emb-pipe-flt;
+ qcom,msm-bus,name = "ipa";
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <90 512 0 0>, /* No BIMC vote (ab=0 Mbps, ib=0 Mbps ~ 0MHZ) */
+ <90 512 100000 800000>, /* SVS (ab=100, ib=800 ~ 50MHz) */
+ <90 512 100000 1200000>; /* PERF (ab=100, ib=1200 ~ 75MHz) */
+ qcom,bus-vector-names = "MIN", "SVS", "PERF";
+ };
+
+ qcom,rmnet-ipa {
+ compatible = "qcom,rmnet-ipa";
+ qcom,rmnet-ipa-ssr;
+ qcom,ipa-loaduC;
+ qcom,ipa-advertise-sg-support;
+ };
+
spmi_bus: qcom,spmi@200f000 {
compatible = "qcom,spmi-pmic-arb";
reg = <0x200f000 0x1000>,
@@ -1463,6 +1787,107 @@
reg = <0x070f8000 0x300>;
qcom,reset-ep-after-lpm-resume;
};
+
+ qcom,lpass@c200000 {
+ compatible = "qcom,pil-tz-generic";
+ reg = <0xc200000 0x00100>;
+ interrupts = <0 293 1>;
+
+ vdd_cx-supply = <&pm8953_s2_level>;
+ qcom,proxy-reg-names = "vdd_cx";
+ qcom,vdd_cx-uV-uA = <RPM_SMD_REGULATOR_LEVEL_TURBO 100000>;
+ qcom,mas-crypto = <&mas_crypto>;
+
+ clocks = <&clock_gcc clk_xo_pil_lpass_clk>,
+ <&clock_gcc clk_gcc_crypto_clk>,
+ <&clock_gcc clk_gcc_crypto_ahb_clk>,
+ <&clock_gcc clk_gcc_crypto_axi_clk>,
+ <&clock_gcc clk_crypto_clk_src>;
+ clock-names = "xo", "scm_core_clk", "scm_iface_clk",
+ "scm_bus_clk", "scm_core_clk_src";
+ qcom,proxy-clock-names = "xo", "scm_core_clk", "scm_iface_clk",
+ "scm_bus_clk", "scm_core_clk_src";
+ qcom,scm_core_clk_src-freq = <80000000>;
+
+ qcom,pas-id = <1>;
+ qcom,complete-ramdump;
+ qcom,proxy-timeout-ms = <10000>;
+ qcom,smem-id = <423>;
+ qcom,sysmon-id = <1>;
+ qcom,ssctl-instance-id = <0x14>;
+ qcom,firmware-name = "adsp";
+
+ memory-region = <&adsp_fw_mem>;
+ };
+
+ qcom,pronto@a21b000 {
+ compatible = "qcom,pil-tz-generic";
+ reg = <0x0a21b000 0x3000>;
+ interrupts = <0 149 1>;
+
+ vdd_pronto_pll-supply = <&pm8953_l7>;
+ proxy-reg-names = "vdd_pronto_pll";
+ vdd_pronto_pll-uV-uA = <1800000 18000>;
+ qcom,mas-crypto = <&mas_crypto>;
+
+ clocks = <&clock_gcc clk_xo_pil_pronto_clk>,
+ <&clock_gcc clk_gcc_crypto_clk>,
+ <&clock_gcc clk_gcc_crypto_ahb_clk>,
+ <&clock_gcc clk_gcc_crypto_axi_clk>,
+ <&clock_gcc clk_crypto_clk_src>;
+
+ clock-names = "xo", "scm_core_clk", "scm_iface_clk",
+ "scm_bus_clk", "scm_core_clk_src";
+ qcom,proxy-clock-names = "xo", "scm_core_clk", "scm_iface_clk",
+ "scm_bus_clk", "scm_core_clk_src";
+ qcom,scm_core_clk_src = <80000000>;
+
+ qcom,pas-id = <6>;
+ qcom,proxy-timeout-ms = <10000>;
+ qcom,smem-id = <422>;
+ qcom,sysmon-id = <6>;
+ qcom,ssctl-instance-id = <0x13>;
+ qcom,firmware-name = "wcnss";
+
+ memory-region = <&wcnss_fw_mem>;
+ };
+
+ qcom,venus@1de0000 {
+ compatible = "qcom,pil-tz-generic";
+ reg = <0x1de0000 0x4000>;
+
+ vdd-supply = <&gdsc_venus>;
+ qcom,proxy-reg-names = "vdd";
+
+ clocks = <&clock_gcc clk_gcc_venus0_vcodec0_clk>,
+ <&clock_gcc clk_gcc_venus0_ahb_clk>,
+ <&clock_gcc clk_gcc_venus0_axi_clk>,
+ <&clock_gcc clk_gcc_crypto_clk>,
+ <&clock_gcc clk_gcc_crypto_ahb_clk>,
+ <&clock_gcc clk_gcc_crypto_axi_clk>,
+ <&clock_gcc clk_crypto_clk_src>;
+
+ clock-names = "core_clk", "iface_clk", "bus_clk",
+ "scm_core_clk", "scm_iface_clk",
+ "scm_bus_clk", "scm_core_clk_src";
+
+ qcom,proxy-clock-names = "core_clk", "iface_clk",
+ "bus_clk", "scm_core_clk",
+ "scm_iface_clk", "scm_bus_clk",
+ "scm_core_clk_src";
+ qcom,scm_core_clk_src-freq = <80000000>;
+
+ qcom,msm-bus,name = "pil-venus";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <63 512 0 0>,
+ <63 512 0 304000>;
+ qcom,pas-id = <9>;
+ qcom,proxy-timeout-ms = <100>;
+ qcom,firmware-name = "venus";
+ memory-region = <&venus_mem>;
+ };
};
#include "pm8953-rpm-regulator.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/pm8953.dtsi b/arch/arm64/boot/dts/qcom/pm8953.dtsi
index 0ddb9f5..6d85d7b 100644
--- a/arch/arm64/boot/dts/qcom/pm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8953.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -65,105 +65,34 @@
};
pm8953_mpps: mpps {
- compatible = "qcom,qpnp-pin";
- spmi-dev-container;
+ compatible = "qcom,spmi-mpp";
+ reg = <0xa000 0x400>;
+
+ interrupts = <0x0 0xa0 0 IRQ_TYPE_NONE>,
+ <0x0 0xa1 0 IRQ_TYPE_NONE>,
+ <0x0 0xa2 0 IRQ_TYPE_NONE>,
+ <0x0 0xa3 0 IRQ_TYPE_NONE>;
+ interrupt-names = "pm8953_mpp1", "pm8953_mpp2",
+ "pm8953_mpp3", "pm8953_mpp4";
+
gpio-controller;
#gpio-cells = <2>;
- #address-cells = <1>;
- #size-cells = <1>;
- label = "pm8953-mpp";
-
- mpp@a000 {
- reg = <0xa000 0x100>;
- qcom,pin-num = <1>;
- status = "disabled";
- };
-
- mpp@a100 {
- reg = <0xa100 0x100>;
- qcom,pin-num = <2>;
- /* MPP2 - PA_THERM config */
- qcom,mode = <4>; /* AIN input */
- qcom,invert = <1>; /* Enable MPP */
- qcom,ain-route = <1>; /* AMUX 6 */
- qcom,master-en = <1>;
- qcom,src-sel = <0>; /* Function constant */
- };
-
- mpp@a200 {
- reg = <0xa200 0x100>;
- qcom,pin-num = <3>;
- status = "disabled";
- };
-
- mpp@a300 {
- reg = <0xa300 0x100>;
- qcom,pin-num = <4>;
- /* MPP4 - CASE_THERM config */
- qcom,mode = <4>; /* AIN input */
- qcom,invert = <1>; /* Enable MPP */
- qcom,ain-route = <3>; /* AMUX 8 */
- qcom,master-en = <1>;
- qcom,src-sel = <0>; /* Function constant */
- };
};
pm8953_gpios: gpios {
- spmi-dev-container;
- compatible = "qcom,qpnp-pin";
+ compatible = "qcom,spmi-gpio";
+ reg = <0xc000 0x800>;
+
+ interrupts = <0x0 0xc0 0 IRQ_TYPE_NONE>,
+ <0x0 0xc3 0 IRQ_TYPE_NONE>,
+ <0x0 0xc6 0 IRQ_TYPE_NONE>,
+ <0x0 0xc7 0 IRQ_TYPE_NONE>;
+ interrupt-names = "pm8953_gpio1", "pm8953_gpio4",
+ "pm8953_gpio7", "pm8953_gpio8";
+
gpio-controller;
#gpio-cells = <2>;
- #address-cells = <1>;
- #size-cells = <1>;
- label = "pm8953-gpio";
-
- gpio@c000 {
- reg = <0xc000 0x100>;
- qcom,pin-num = <1>;
- status = "disabled";
- };
-
- gpio@c100 {
- reg = <0xc100 0x100>;
- qcom,pin-num = <2>;
- status = "disabled";
- };
-
- gpio@c200 {
- reg = <0xc200 0x100>;
- qcom,pin-num = <3>;
- status = "disabled";
- };
-
- gpio@c300 {
- reg = <0xc300 0x100>;
- qcom,pin-num = <4>;
- status = "disabled";
- };
-
- gpio@c400 {
- reg = <0xc400 0x100>;
- qcom,pin-num = <5>;
- status = "disabled";
- };
-
- gpio@c500 {
- reg = <0xc500 0x100>;
- qcom,pin-num = <6>;
- status = "disabled";
- };
-
- gpio@c600 {
- reg = <0xc600 0x100>;
- qcom,pin-num = <7>;
- status = "disabled";
- };
-
- gpio@c700 {
- reg = <0xc700 0x100>;
- qcom,pin-num = <8>;
- status = "disabled";
- };
+ qcom,gpios-disallowed = <2 3 5 6>;
};
pm8953_vadc: vadc@3100 {
diff --git a/arch/arm64/boot/dts/qcom/pmi632.dtsi b/arch/arm64/boot/dts/qcom/pmi632.dtsi
new file mode 100644
index 0000000..b0fb23c
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/pmi632.dtsi
@@ -0,0 +1,119 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/spmi/spmi.h>
+#include <dt-bindings/msm/power-on.h>
+
+&spmi_bus {
+ qcom,pmi632@2 {
+ compatible = "qcom,spmi-pmic";
+ reg = <0x2 SPMI_USID>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ pmi632_revid: qcom,revid@100 {
+ compatible = "qcom,qpnp-revid";
+ reg = <0x100 0x100>;
+ };
+
+ pmi632_pon: qcom,power-on@800 {
+ compatible = "qcom,qpnp-power-on";
+ reg = <0x800 0x100>;
+ qcom,secondary-pon-reset;
+ };
+
+ pmi632_tz: qcom,temp-alarm@2400 {
+ compatible = "qcom,qpnp-temp-alarm";
+ reg = <0x2400 0x100>;
+ interrupts = <0x2 0x24 0x0 IRQ_TYPE_EDGE_RISING>;
+ label = "pmi632_tz";
+ #thermal-sensor-cells = <0>;
+ };
+
+ pmi632_gpios: pinctrl@c000 {
+ compatible = "qcom,spmi-gpio";
+ reg = <0xc000 0x800>;
+ interrupts = <0x2 0xc1 0 IRQ_TYPE_NONE>,
+ <0x2 0xc2 0 IRQ_TYPE_NONE>,
+ <0x2 0xc3 0 IRQ_TYPE_NONE>,
+ <0x2 0xc4 0 IRQ_TYPE_NONE>,
+ <0x2 0xc5 0 IRQ_TYPE_NONE>,
+ <0x2 0xc6 0 IRQ_TYPE_NONE>,
+ <0x2 0xc7 0 IRQ_TYPE_NONE>,
+ interrupt-names = "pmi632_gpio2", "pmi632_gpio3",
+ "pmi632_gpio4", "pmi632_gpio5",
+ "pmi632_gpio6", "pmi632_gpio7",
+ "pmi632_gpio8";
+ gpio-controller;
+ #gpio-cells = <2>;
+ qcom,gpios-disallowed = <1>;
+ };
+ };
+
+ pmi632_3: qcom,pmi632@3 {
+ compatible ="qcom,spmi-pmic";
+ reg = <0x3 SPMI_USID>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ pmi632_pwm_1: pwm@b300 {
+ compatible = "qcom,qpnp-pwm";
+ reg = <0xb300 0x100>;
+ reg-names = "qpnp-lpg-channel-base";
+ qcom,channel-id = <1>;
+ qcom,supported-sizes = <6>, <9>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pmi632_pwm_2: pwm@b400 {
+ compatible = "qcom,qpnp-pwm";
+ reg = <0xb400 0x100>;
+ reg-names = "qpnp-lpg-channel-base";
+ qcom,channel-id = <2>;
+ qcom,supported-sizes = <6>, <9>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pmi632_pwm_3: pwm@b500 {
+ compatible = "qcom,qpnp-pwm";
+ reg = <0xb500 0x100>;
+ reg-names = "qpnp-lpg-channel-base";
+ qcom,channel-id = <3>;
+ qcom,supported-sizes = <6>, <9>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pmi632_pwm_4: pwm@b600 {
+ compatible = "qcom,qpnp-pwm";
+ reg = <0xb600 0x100>;
+ reg-names = "qpnp-lpg-channel-base";
+ qcom,channel-id = <4>;
+ qcom,supported-sizes = <6>, <9>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pmi632_pwm_5: pwm@b700 {
+ compatible = "qcom,qpnp-pwm";
+ reg = <0xb700 0x100>;
+ reg-names = "qpnp-lpg-channel-base";
+ qcom,channel-id = <5>;
+ qcom,supported-sizes = <6>, <9>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/pmi8950.dtsi b/arch/arm64/boot/dts/qcom/pmi8950.dtsi
index e731f5b..97be32de 100644
--- a/arch/arm64/boot/dts/qcom/pmi8950.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8950.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -140,57 +140,30 @@
};
pmi8950_gpios: gpios {
- compatible = "qcom,qpnp-pin";
+ compatible = "qcom,spmi-gpio";
+ reg = <0xc000 0x200>;
+
+ interrupts = <0x2 0xc0 0 IRQ_TYPE_NONE>,
+ <0x2 0xc1 0 IRQ_TYPE_NONE>;
+ interrupt-names = "pmi8950_gpio1", "pmi8950_gpio2";
+
gpio-controller;
#gpio-cells = <2>;
- #address-cells = <1>;
- #size-cells = <1>;
- label = "pmi8950-gpio";
-
- gpio@c000 {
- reg = <0xc000 0x100>;
- qcom,pin-num = <1>;
- status = "disabled";
- };
-
- gpio@c100 {
- reg = <0xc100 0x100>;
- qcom,pin-num = <2>;
- status = "disabled";
- };
};
pmi8950_mpps: mpps {
- compatible = "qcom,qpnp-pin";
+ compatible = "qcom,spmi-mpp";
+ reg = <0xa000 0x400>;
+
+ interrupts = <0x2 0xa0 0 IRQ_TYPE_NONE>,
+ <0x2 0xa1 0 IRQ_TYPE_NONE>,
+ <0x2 0xa2 0 IRQ_TYPE_NONE>,
+ <0x2 0xa3 0 IRQ_TYPE_NONE>;
+ interrupt-names = "pmi8950_mpp1", "pmi8950_mpp2",
+ "pmi8950_mpp3", "pmi8950_mpp4";
+
gpio-controller;
#gpio-cells = <2>;
- #address-cells = <1>;
- #size-cells = <1>;
- label = "pmi8950-mpp";
-
- mpp@a000 {
- reg = <0xa000 0x100>;
- qcom,pin-num = <1>;
- status = "disabled";
- };
-
- mpp@a100 {
- reg = <0xa100 0x100>;
- qcom,pin-num = <2>;
- status = "disabled";
- };
-
- mpp@a200 {
- reg = <0xa200 0x100>;
- qcom,pin-num = <3>;
- status = "disabled";
- };
-
- mpp@a300 {
- reg = <0xa300 0x100>;
- qcom,pin-num = <4>;
- status = "disabled";
- };
};
pmi8950_charger: qcom,qpnp-smbcharger {
diff --git a/arch/arm64/boot/dts/qcom/qcs605-360camera-overlay.dts b/arch/arm64/boot/dts/qcom/qcs605-360camera-overlay.dts
new file mode 100644
index 0000000..e7a2197
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs605-360camera-overlay.dts
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "qcs605-360camera.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. QCS605 PM660+PM660L 360camera";
+ compatible = "qcom,qcs605-mtp", "qcom,qcs605", "qcom,mtp";
+ qcom,msm-id = <347 0x0>;
+ qcom,board-id = <0x0000000b 1>;
+ qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+ <0x0001001b 0x0102001a 0x0 0x0>,
+ <0x0001001b 0x0201011a 0x0 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sda845-v2-hdk.dtsi b/arch/arm64/boot/dts/qcom/sda845-v2-hdk.dtsi
index 26a73b0..378c4a1 100644
--- a/arch/arm64/boot/dts/qcom/sda845-v2-hdk.dtsi
+++ b/arch/arm64/boot/dts/qcom/sda845-v2-hdk.dtsi
@@ -38,3 +38,56 @@
&usb_qmp_phy {
status = "ok";
};
+
+&tlmm {
+ pmx_ts_rst_active {
+ ts_rst_active: ts_rst_active {
+ mux {
+ pins = "gpio99";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio99";
+ drive-strength = <16>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ pmx_ts_rst_suspend {
+ ts_rst_suspend: ts_rst_suspend {
+ mux {
+ pins = "gpio99";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio99";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+ };
+};
+
+&soc {
+ hbtp {
+ compatible = "qcom,hbtp-input";
+ pinctrl-names = "pmx_ts_active", "pmx_ts_suspend";
+ pinctrl-0 = <&ts_rst_active>;
+ pinctrl-1 = <&ts_rst_suspend>;
+ vcc_ana-supply = <&pm8998_l28>;
+ vcc_dig-supply = <&pm8998_l14>;
+ qcom,afe-load = <20000>;
+ qcom,afe-vtg-min = <3000000>;
+ qcom,afe-vtg-max = <3000000>;
+ qcom,dig-load = <40000>;
+ qcom,dig-vtg-min = <1800000>;
+ qcom,dig-vtg-max = <1800000>;
+ qcom,fb-resume-delay-us = <1000>;
+ qcom,afe-force-power-on;
+ qcom,afe-power-on-delay-us = <6>;
+ qcom,afe-power-off-delay-us = <6>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi
index 5dd5c0d..6510fa2 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi
@@ -121,6 +121,13 @@
pinctrl-1 = <&wcd_usbc_analog_en1_idle>;
};
+ wcd_gnd_mic_swap_gpio: msm_cdc_pinctrl_gnd_mic_swap {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&wcd_gnd_mic_swap_active>;
+ pinctrl-1 = <&wcd_gnd_mic_swap_idle>;
+ };
+
cdc_pdm_gpios: cdc_pdm_pinctrl {
compatible = "qcom,msm-cdc-pinctrl";
pinctrl-names = "aud_active", "aud_sleep";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-360camera.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-360camera.dtsi
index 18b0cd8..c40fff6 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-360camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-360camera.dtsi
@@ -11,247 +11,245 @@
* GNU General Public License for more details.
*/
-&soc {
- led_flash_rear: qcom,camera-flash@0 {
- cell-index = <0>;
- reg = <0x00 0x00>;
- compatible = "qcom,camera-flash";
- flash-source = <&pm660l_flash0 &pm660l_flash1>;
- torch-source = <&pm660l_torch0 &pm660l_torch1>;
- switch-source = <&pm660l_switch0>;
- status = "ok";
- };
+&led_flash_rear {
+ cell-index = <0>;
+ reg = <0x00 0x00>;
+ compatible = "qcom,camera-flash";
+ flash-source = <&pm660l_flash0 &pm660l_flash1>;
+ torch-source = <&pm660l_torch0 &pm660l_torch1>;
+ switch-source = <&pm660l_switch0>;
+ status = "ok";
+};
- led_flash_front: qcom,camera-flash@1 {
- cell-index = <1>;
- reg = <0x01 0x00>;
- compatible = "qcom,camera-flash";
- flash-source = <&pm660l_flash2>;
- torch-source = <&pm660l_torch2>;
- switch-source = <&pm660l_switch1>;
- status = "ok";
- };
+&led_flash_front {
+ cell-index = <1>;
+ reg = <0x01 0x00>;
+ compatible = "qcom,camera-flash";
+ flash-source = <&pm660l_flash2>;
+ torch-source = <&pm660l_torch2>;
+ switch-source = <&pm660l_switch1>;
+ status = "ok";
+};
- actuator_regulator: gpio-regulator@0 {
- compatible = "regulator-fixed";
- reg = <0x00 0x00>;
- regulator-name = "actuator_regulator";
- regulator-min-microvolt = <2800000>;
- regulator-max-microvolt = <2800000>;
- regulator-enable-ramp-delay = <100>;
- enable-active-high;
- gpio = <&tlmm 27 0>;
- };
+&actuator_regulator {
+ compatible = "regulator-fixed";
+ reg = <0x00 0x00>;
+ regulator-name = "actuator_regulator";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <100>;
+ enable-active-high;
+ gpio = <&tlmm 27 0>;
+};
- camera_ldo: gpio-regulator@2 {
- compatible = "regulator-fixed";
- reg = <0x02 0x00>;
- regulator-name = "camera_ldo";
- regulator-min-microvolt = <1352000>;
- regulator-max-microvolt = <1352000>;
- regulator-enable-ramp-delay = <233>;
- enable-active-high;
- gpio = <&pm660l_gpios 4 0>;
- pinctrl-names = "default";
- pinctrl-0 = <&camera_dvdd_en_default>;
- vin-supply = <&pm660_s6>;
- };
+&camera_ldo {
+ compatible = "regulator-fixed";
+ reg = <0x02 0x00>;
+ regulator-name = "camera_ldo";
+ regulator-min-microvolt = <1352000>;
+ regulator-max-microvolt = <1352000>;
+ regulator-enable-ramp-delay = <233>;
+ enable-active-high;
+ gpio = <&pm660l_gpios 4 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&camera_dvdd_en_default>;
+ vin-supply = <&pm660_s6>;
+};
- camera_rear_ldo: gpio-regulator@1 {
- compatible = "regulator-fixed";
- reg = <0x01 0x00>;
- regulator-name = "camera_rear_ldo";
- regulator-min-microvolt = <1352000>;
- regulator-max-microvolt = <1352000>;
- regulator-enable-ramp-delay = <135>;
- enable-active-high;
- gpio = <&pm660l_gpios 4 0>;
- pinctrl-names = "default";
- pinctrl-0 = <&camera_rear_dvdd_en_default>;
- vin-supply = <&pm660_s6>;
- };
+&camera_rear_ldo {
+ compatible = "regulator-fixed";
+ reg = <0x01 0x00>;
+ regulator-name = "camera_rear_ldo";
+ regulator-min-microvolt = <1352000>;
+ regulator-max-microvolt = <1352000>;
+ regulator-enable-ramp-delay = <135>;
+ enable-active-high;
+ gpio = <&pm660l_gpios 4 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&camera_rear_dvdd_en_default>;
+ vin-supply = <&pm660_s6>;
+};
- camera_vio_ldo: gpio-regulator@3 {
- compatible = "regulator-fixed";
- reg = <0x03 0x00>;
- regulator-name = "camera_vio_ldo";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-enable-ramp-delay = <233>;
- enable-active-high;
- gpio = <&tlmm 29 0>;
- pinctrl-names = "default";
- pinctrl-0 = <&cam_sensor_rear_vio>;
- vin-supply = <&pm660_s4>;
- };
+&camera_vio_ldo {
+ compatible = "regulator-fixed";
+ reg = <0x03 0x00>;
+ regulator-name = "camera_vio_ldo";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <233>;
+ enable-active-high;
+ gpio = <&tlmm 29 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam_sensor_rear_vio>;
+ vin-supply = <&pm660_s4>;
+};
- camera_vana_ldo: gpio-regulator@4 {
- compatible = "regulator-fixed";
- reg = <0x04 0x00>;
- regulator-name = "camera_vana_ldo";
- regulator-min-microvolt = <2850000>;
- regulator-max-microvolt = <2850000>;
- regulator-enable-ramp-delay = <233>;
- enable-active-high;
- gpio = <&tlmm 8 0>;
- pinctrl-names = "default";
- pinctrl-0 = <&cam_sensor_rear_vana>;
- vin-supply = <&pm660l_bob>;
- };
+&camera_vana_ldo {
+ compatible = "regulator-fixed";
+ reg = <0x04 0x00>;
+ regulator-name = "camera_vana_ldo";
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ regulator-enable-ramp-delay = <233>;
+ enable-active-high;
+ gpio = <&tlmm 8 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam_sensor_rear_vana>;
+ vin-supply = <&pm660l_bob>;
+};
+
+&actuator_rear {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,actuator";
+ cci-master = <0>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
+};
+
+&actuator_front {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,actuator";
+ cci-master = <1>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
+};
+
+&ois_rear {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,ois";
+ cci-master = <0>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
+ status = "disabled";
+};
+
+&eeprom_rear {
+ cell-index = <0>;
+ reg = <0>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&camera_vio_ldo>;
+ cam_vana-supply = <&camera_vana_ldo>;
+ cam_vdig-supply = <&camera_rear_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk", "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>;
+ rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>;
+ rgltr-load-current = <0 80000 105000 0 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_rear_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_rear_suspend>;
+ gpios = <&tlmm 13 0>,
+ <&tlmm 30 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0";
+ sensor-mode = <0>;
+ cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+};
+
+&eeprom_rear_aux {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&camera_vio_ldo>;
+ cam_vana-supply = <&camera_vana_ldo>;
+ cam_vdig-supply = <&camera_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+ "cam_clk", "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1352000 1800000 2850000 0 2800000>;
+ rgltr-max-voltage = <1352000 1800000 2850000 0 2800000>;
+ rgltr-load-current = <105000 0 80000 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 14 0>,
+ <&tlmm 28 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK1",
+ "CAM_RESET1";
+ sensor-position = <0>;
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+};
+
+&eeprom_front {
+ cell-index = <2>;
+ reg = <0x2>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&camera_vio_ldo>;
+ cam_vana-supply = <&camera_vana_ldo>;
+ cam_vdig-supply = <&camera_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk", "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>;
+ rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_front_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_front_suspend>;
+ gpios = <&tlmm 15 0>,
+ <&tlmm 9 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2";
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
};
&cam_cci {
- actuator_rear: qcom,actuator@0 {
- cell-index = <0>;
- reg = <0x0>;
- compatible = "qcom,actuator";
- cci-master = <0>;
- cam_vaf-supply = <&actuator_regulator>;
- regulator-names = "cam_vaf";
- rgltr-cntrl-support;
- rgltr-min-voltage = <2800000>;
- rgltr-max-voltage = <2800000>;
- rgltr-load-current = <0>;
- };
-
- actuator_front: qcom,actuator@1 {
- cell-index = <1>;
- reg = <0x1>;
- compatible = "qcom,actuator";
- cci-master = <1>;
- cam_vaf-supply = <&actuator_regulator>;
- regulator-names = "cam_vaf";
- rgltr-cntrl-support;
- rgltr-min-voltage = <2800000>;
- rgltr-max-voltage = <2800000>;
- rgltr-load-current = <0>;
- };
-
- ois_rear: qcom,ois@0 {
- cell-index = <0>;
- reg = <0x0>;
- compatible = "qcom,ois";
- cci-master = <0>;
- cam_vaf-supply = <&actuator_regulator>;
- regulator-names = "cam_vaf";
- rgltr-cntrl-support;
- rgltr-min-voltage = <2800000>;
- rgltr-max-voltage = <2800000>;
- rgltr-load-current = <0>;
- status = "disabled";
- };
-
- eeprom_rear: qcom,eeprom@0 {
- cell-index = <0>;
- reg = <0>;
- compatible = "qcom,eeprom";
- cam_vio-supply = <&camera_vio_ldo>;
- cam_vana-supply = <&camera_vana_ldo>;
- cam_vdig-supply = <&camera_rear_ldo>;
- cam_clk-supply = <&titan_top_gdsc>;
- cam_vaf-supply = <&actuator_regulator>;
- regulator-names = "cam_vio", "cam_vana", "cam_vdig",
- "cam_clk", "cam_vaf";
- rgltr-cntrl-support;
- rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>;
- rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>;
- rgltr-load-current = <0 80000 105000 0 0>;
- gpio-no-mux = <0>;
- pinctrl-names = "cam_default", "cam_suspend";
- pinctrl-0 = <&cam_sensor_mclk0_active
- &cam_sensor_rear_active>;
- pinctrl-1 = <&cam_sensor_mclk0_suspend
- &cam_sensor_rear_suspend>;
- gpios = <&tlmm 13 0>,
- <&tlmm 30 0>;
- gpio-reset = <1>;
- gpio-req-tbl-num = <0 1>;
- gpio-req-tbl-flags = <1 0>;
- gpio-req-tbl-label = "CAMIF_MCLK0",
- "CAM_RESET0";
- sensor-mode = <0>;
- cci-master = <0>;
- status = "ok";
- clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
- clock-names = "cam_clk";
- clock-cntl-level = "turbo";
- clock-rates = <24000000>;
- };
-
- eeprom_rear_aux: qcom,eeprom@1 {
- cell-index = <1>;
- reg = <0x1>;
- compatible = "qcom,eeprom";
- cam_vio-supply = <&camera_vio_ldo>;
- cam_vana-supply = <&camera_vana_ldo>;
- cam_vdig-supply = <&camera_ldo>;
- cam_clk-supply = <&titan_top_gdsc>;
- cam_vaf-supply = <&actuator_regulator>;
- regulator-names = "cam_vdig", "cam_vio", "cam_vana",
- "cam_clk", "cam_vaf";
- rgltr-cntrl-support;
- rgltr-min-voltage = <1352000 1800000 2850000 0 2800000>;
- rgltr-max-voltage = <1352000 1800000 2850000 0 2800000>;
- rgltr-load-current = <105000 0 80000 0>;
- gpio-no-mux = <0>;
- pinctrl-names = "cam_default", "cam_suspend";
- pinctrl-0 = <&cam_sensor_mclk1_active
- &cam_sensor_rear2_active>;
- pinctrl-1 = <&cam_sensor_mclk1_suspend
- &cam_sensor_rear2_suspend>;
- gpios = <&tlmm 14 0>,
- <&tlmm 28 0>;
- gpio-reset = <1>;
- gpio-req-tbl-num = <0 1>;
- gpio-req-tbl-flags = <1 0>;
- gpio-req-tbl-label = "CAMIF_MCLK1",
- "CAM_RESET1";
- sensor-position = <0>;
- sensor-mode = <0>;
- cci-master = <1>;
- status = "ok";
- clock-names = "cam_clk";
- clock-cntl-level = "turbo";
- clock-rates = <24000000>;
- };
-
- eeprom_front: qcom,eeprom@2 {
- cell-index = <2>;
- reg = <0x2>;
- compatible = "qcom,eeprom";
- cam_vio-supply = <&camera_vio_ldo>;
- cam_vana-supply = <&camera_vana_ldo>;
- cam_vdig-supply = <&camera_ldo>;
- cam_clk-supply = <&titan_top_gdsc>;
- cam_vaf-supply = <&actuator_regulator>;
- regulator-names = "cam_vio", "cam_vana", "cam_vdig",
- "cam_clk", "cam_vaf";
- rgltr-cntrl-support;
- rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>;
- rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>;
- rgltr-load-current = <0 80000 105000 0>;
- gpio-no-mux = <0>;
- pinctrl-names = "cam_default", "cam_suspend";
- pinctrl-0 = <&cam_sensor_mclk2_active
- &cam_sensor_front_active>;
- pinctrl-1 = <&cam_sensor_mclk2_suspend
- &cam_sensor_front_suspend>;
- gpios = <&tlmm 15 0>,
- <&tlmm 9 0>;
- gpio-reset = <1>;
- gpio-req-tbl-num = <0 1>;
- gpio-req-tbl-flags = <1 0>;
- gpio-req-tbl-label = "CAMIF_MCLK2",
- "CAM_RESET2";
- sensor-mode = <0>;
- cci-master = <1>;
- status = "ok";
- clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
- clock-names = "cam_clk";
- clock-cntl-level = "turbo";
- clock-rates = <24000000>;
- };
-
qcom,cam-sensor@0 {
cell-index = <0>;
compatible = "qcom,cam-sensor";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
index 7928ab5..108eda5 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
@@ -562,6 +562,8 @@
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
+ status = "disabled";
+
ports {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-external-codec-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-external-codec-cdp-overlay.dts
index 32a8580..5eb7919 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-external-codec-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-external-codec-cdp-overlay.dts
@@ -31,3 +31,7 @@
<0x0001001b 0x0102001a 0x0 0x0>,
<0x0001001b 0x0201011a 0x0 0x0>;
};
+
+&tavil_snd {
+ qcom,us-euro-gpios = <&tavil_us_euro_sw>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-external-codec-cdp.dts b/arch/arm64/boot/dts/qcom/sdm670-external-codec-cdp.dts
index 6a87d3a..88beca9 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-external-codec-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-external-codec-cdp.dts
@@ -25,3 +25,7 @@
<0x0001001b 0x0102001a 0x0 0x0>,
<0x0001001b 0x0201011a 0x0 0x0>;
};
+
+&tavil_snd {
+ qcom,us-euro-gpios = <&tavil_us_euro_sw>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-external-codec-pm660a-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-external-codec-pm660a-cdp-overlay.dts
index 48a6066..2aa8512 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-external-codec-pm660a-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-external-codec-pm660a-cdp-overlay.dts
@@ -33,3 +33,6 @@
<0x0001001b 0x0202001a 0x0 0x0>;
};
+&tavil_snd {
+ qcom,us-euro-gpios = <&tavil_us_euro_sw>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-external-codec-pm660a-cdp.dts b/arch/arm64/boot/dts/qcom/sdm670-external-codec-pm660a-cdp.dts
index e64d13b..43198bb 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-external-codec-pm660a-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-external-codec-pm660a-cdp.dts
@@ -26,3 +26,7 @@
<0x0001001b 0x0002001a 0x0 0x0>,
<0x0001001b 0x0202001a 0x0 0x0>;
};
+
+&tavil_snd {
+ qcom,us-euro-gpios = <&tavil_us_euro_sw>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
index 9e75ee0..f287b21 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
@@ -58,6 +58,7 @@
qcom,initial-pwrlevel = <3>;
qcom,gpu-quirk-hfi-use-reg;
+ qcom,gpu-quirk-limit-uche-gbif-rw;
/* <HZ/12> */
qcom,idle-timeout = <80>;
@@ -117,7 +118,7 @@
cache-slices = <&llcc 12>, <&llcc 11>;
/* CPU latency parameter */
- qcom,pm-qos-active-latency = <914>;
+ qcom,pm-qos-active-latency = <899>;
qcom,pm-qos-wakeup-latency = <899>;
/* Enable context aware freq. scaling */
@@ -134,6 +135,8 @@
#size-cells = <0>;
compatible = "qcom,gpu-coresight";
+ status = "disabled";
+
qcom,gpu-coresight@0 {
reg = <0>;
coresight-name = "coresight-gfx";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-int-cdc-usbc-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm670-int-cdc-usbc-audio-overlay.dtsi
index df10e7d..cb0a386 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-int-cdc-usbc-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-int-cdc-usbc-audio-overlay.dtsi
@@ -14,4 +14,5 @@
&int_codec {
qcom,msm-mbhc-usbc-audio-supported = <1>;
qcom,usbc-analog-en1-gpio = <&wcd_usbc_analog_en1_gpio>;
+ qcom,us-euro-gpios = <&wcd_gnd_mic_swap_gpio>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-ion.dtsi b/arch/arm64/boot/dts/qcom/sdm670-ion.dtsi
index 61ef7ff..3fd1229 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-ion.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-ion.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -21,18 +21,18 @@
qcom,ion-heap-type = "SYSTEM";
};
- qcom,ion-heap@22 { /* ADSP HEAP */
- reg = <22>;
- memory-region = <&adsp_mem>;
- qcom,ion-heap-type = "DMA";
- };
-
qcom,ion-heap@27 { /* QSEECOM HEAP */
reg = <27>;
memory-region = <&qseecom_mem>;
qcom,ion-heap-type = "DMA";
};
+ qcom,ion-heap@19 { /* QSEECOM TA HEAP */
+ reg = <19>;
+ memory-region = <&qseecom_ta_mem>;
+ qcom,ion-heap-type = "DMA";
+ };
+
qcom,ion-heap@13 { /* SPSS HEAP */
reg = <13>;
memory-region = <&sp_mem>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
index ffed74c..9025d6b 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
@@ -1979,6 +1979,19 @@
drive-strength = <2>; /* 2 MA */
};
};
+
+ nx30p6093_intr_default: nx30p6093_intr_default {
+ mux {
+ pins = "gpio5";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio5";
+ bias-disable;
+ input-enable;
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
index fe88aae..5bf8df7 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
@@ -131,7 +131,6 @@
#size-cells = <0>;
qcom,psci-mode-shift = <0>;
qcom,psci-mode-mask = <0xf>;
- qcom,use-prediction;
qcom,cpu = <&CPU6 &CPU7>;
qcom,pm-cpu-level@0 { /* C1 */
@@ -186,4 +185,8 @@
reg = <0xc300000 0x1000>, <0xc3f0004 0x4>;
reg-names = "phys_addr_base", "offset_addr";
};
+
+ qcom,rpmh-master-stats {
+ compatible = "qcom,rpmh-master-stats";
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
index de125e2..8a8d42fc 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -625,15 +625,35 @@
};
&dsi_sim_cmd {
- qcom,mdss-dsi-t-clk-post = <0x0d>;
- qcom,mdss-dsi-t-clk-pre = <0x2d>;
+ qcom,mdss-dsi-t-clk-post = <0x0c>;
+ qcom,mdss-dsi-t-clk-pre = <0x29>;
qcom,mdss-dsi-display-timings {
timing@0{
- qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
- 07 05 03 04 00];
qcom,display-topology = <1 0 1>,
- <2 0 1>;
- qcom,default-topology-index = <0>;
+ <2 2 1>;
+ qcom,default-topology-index = <1>;
+ qcom,panel-roi-alignment = <720 40 720 40 720 40>;
+ qcom,partial-update-enabled = "single_roi";
+ qcom,mdss-dsi-panel-phy-timings = [00 1a 06 06 22 20 07
+ 07 04 03 04 00];
+ };
+ timing@1{
+ qcom,display-topology = <1 0 1>,
+ <2 2 1>;
+ qcom,default-topology-index = <1>;
+ qcom,panel-roi-alignment = <540 40 540 40 540 40>;
+ qcom,partial-update-enabled = "single_roi";
+ qcom,mdss-dsi-panel-phy-timings = [00 1a 06 06 22 20 07
+ 07 04 03 04 00];
+ };
+ timing@2{
+ qcom,display-topology = <1 0 1>,
+ <2 2 1>;
+ qcom,default-topology-index = <1>;
+ qcom,panel-roi-alignment = <360 40 360 40 360 40>;
+ qcom,partial-update-enabled = "single_roi";
+ qcom,mdss-dsi-panel-phy-timings = [00 1a 06 06 22 20 07
+ 07 04 03 04 00];
};
};
};
@@ -709,8 +729,8 @@
qcom,mdss-dsi-t-clk-pre = <0x2d>;
qcom,mdss-dsi-display-timings {
timing@0 {
- qcom,mdss-dsi-panel-timings = [00 1c 08 07 23 22 07 07
- 05 03 04 00];
+ qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
+ 07 05 03 04 00];
qcom,display-topology = <2 0 2>,
<1 0 2>;
qcom,default-topology-index = <0>;
@@ -724,8 +744,8 @@
qcom,ulps-enabled;
qcom,mdss-dsi-display-timings {
timing@0 {
- qcom,mdss-dsi-panel-timings = [00 1c 08 07 23 22 07 07
- 05 03 04 00];
+ qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
+ 07 05 03 04 00];
qcom,display-topology = <2 0 2>,
<1 0 2>;
qcom,default-topology-index = <0>;
@@ -767,13 +787,13 @@
};
&dsi_nt35695b_truly_fhd_cmd {
- qcom,mdss-dsi-t-clk-post = <0x07>;
- qcom,mdss-dsi-t-clk-pre = <0x1c>;
+ qcom,mdss-dsi-t-clk-post = <0x0d>;
+ qcom,mdss-dsi-t-clk-pre = <0x2d>;
qcom,ulps-enabled;
qcom,mdss-dsi-display-timings {
timing@0 {
- qcom,mdss-dsi-panel-phy-timings = [00 1c 05 06 0b 0c
- 05 07 05 03 04 00];
+ qcom,mdss-dsi-panel-phy-timings = [00 1c 08 07 23 22
+ 07 07 05 03 04 00];
qcom,display-topology = <1 0 1>;
qcom,default-topology-index = <0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index e321329..1de8f8f 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -545,6 +545,14 @@
size = <0 0x1400000>;
};
+ qseecom_ta_mem: qseecom_ta_region {
+ compatible = "shared-dma-pool";
+ alloc-ranges = <0 0x00000000 0 0xffffffff>;
+ reusable;
+ alignment = <0 0x400000>;
+ size = <0 0x1000000>;
+ };
+
sp_mem: sp_region { /* SPSS-HLOS ION shared mem */
compatible = "shared-dma-pool";
alloc-ranges = <0 0x00000000 0 0xffffffff>; /* 32-bit */
@@ -745,6 +753,7 @@
compatible = "qcom,memshare-peripheral";
qcom,peripheral-size = <0x500000>;
qcom,client-id = <1>;
+ qcom,allocate-boot-time;
label = "modem";
};
};
@@ -2932,3 +2941,17 @@
&pm660_div_clk {
status = "ok";
};
+
+&qupv3_se10_i2c {
+ nx30p6093: nx30p6093@36 {
+ status = "disabled";
+ compatible = "nxp,nx30p6093";
+ reg = <0x36>;
+ interrupt-parent = <&tlmm>;
+ interrupts = <5 IRQ_TYPE_NONE>;
+ nxp,long-wakeup-sec = <28800>; /* 8 hours */
+ nxp,short-wakeup-ms = <180000>; /* 3 mins */
+ pinctrl-names = "default";
+ pinctrl-0 = <&nx30p6093_intr_default>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
index b24ef1d..ee10cfc 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
@@ -139,4 +139,8 @@
reg = <0xC300000 0x1000>, <0xC3F0004 0x4>;
reg-names = "phys_addr_base", "offset_addr";
};
+
+ qcom,rpmh-master-stats {
+ compatible = "qcom,rpmh-master-stats";
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
index 00f0650..0c1f097 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
@@ -132,6 +132,7 @@
vdd-hba-supply = <&ufs_phy_gdsc>;
vdd-hba-fixed-regulator;
vcc-supply = <&pm8998_l20>;
+ vcc-voltage-level = <2950000 2960000>;
vccq2-supply = <&pm8998_s4>;
vcc-max-microamp = <600000>;
vccq2-max-microamp = <600000>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-smp2p.dtsi b/arch/arm64/boot/dts/qcom/sdm845-smp2p.dtsi
index 7b8b425..a297835 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-smp2p.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-smp2p.dtsi
@@ -330,4 +330,16 @@
interrupt-controller;
#interrupt-cells = <2>;
};
+
+ /* wlan - inbound entry from mss/WLAN PD */
+ smp2pgpio_wlan_1_in: qcom,smp2pgpio-wlan-1-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "wlan";
+ qcom,remote-pid = <1>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 1ce9f1f..213dfdb 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -533,58 +533,82 @@
reg = <0 0x85fc0000 0 0x2f40000>;
};
- pil_camera_mem: camera_region@8ab00000 {
- compatible = "removed-dma-pool";
- no-map;
- reg = <0 0x8ab00000 0 0x500000>;
- };
-
- pil_adsp_mem: pil_adsp_region@8b100000 {
- compatible = "removed-dma-pool";
- no-map;
- reg = <0 0x8b100000 0 0x1a00000>;
- };
-
- wlan_fw_region: wlan_fw_region@8cb00000 {
+ qseecom_mem: qseecom_region@0x8ab00000 {
compatible = "shared-dma-pool";
- reg = <0 0x8cb00000 0 0x100000>;
+ no-map;
+ reg = <0 0x8ab00000 0 0x1400000>;
};
- pil_modem_mem: modem_region@8cc00000 {
+ pil_camera_mem: camera_region@0x8bf00000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x8cc00000 0 0x7600000>;
+ reg = <0 0x8bf00000 0 0x500000>;
};
- pil_video_mem: pil_video_region@94200000 {
+ pil_ipa_fw_mem: ips_fw_region@0x8c400000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x94200000 0 0x500000>;
+ reg = <0 0x8c400000 0 0x10000>;
};
- pil_cdsp_mem: cdsp_regions@94700000 {
+ pil_ipa_gsi_mem: ipa_gsi_region@0x8c410000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x94700000 0 0x800000>;
+ reg = <0 0x8c410000 0 0x5000>;
};
- pil_mba_mem: pil_mba_region@0x94f00000 {
+ pil_gpu_mem: gpu_region@0x8c415000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x94f00000 0 0x200000>;
+ reg = <0 0x8c415000 0 0x2000>;
};
- pil_slpi_mem: pil_slpi_region@95100000 {
+ pil_adsp_mem: adsp_region@0x8c500000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x95100000 0 0x1400000>;
+ reg = <0 0x8c500000 0 0x1a00000>;
};
-
- pil_spss_mem: spss_region@96500000 {
+ wlan_fw_region: wlan_fw_region@0x8df00000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x96500000 0 0x100000>;
+ reg = <0 0x8df00000 0 0x100000>;
+ };
+
+ pil_modem_mem: modem_region@0x8e000000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x8e000000 0 0x7800000>;
+ };
+
+ pil_video_mem: video_region@0x95800000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x95800000 0 0x500000>;
+ };
+
+ pil_cdsp_mem: cdsp_region@0x95d00000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x95d00000 0 0x800000>;
+ };
+
+ pil_mba_mem: mba_region@0x96500000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x96500000 0 0x200000>;
+ };
+
+ pil_slpi_mem: slpi_region@0x96700000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x96700000 0 0x1400000>;
+ };
+
+ pil_spss_mem: pil_spss_region@0x97b00000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x97b00000 0 0x100000>;
};
adsp_mem: adsp_region {
@@ -595,14 +619,6 @@
size = <0 0x1000000>;
};
- qseecom_mem: qseecom_region {
- compatible = "shared-dma-pool";
- alloc-ranges = <0 0x00000000 0 0xffffffff>;
- no-map;
- alignment = <0 0x400000>;
- size = <0 0x1400000>;
- };
-
qseecom_ta_mem: qseecom_ta_region {
compatible = "shared-dma-pool";
alloc-ranges = <0 0x00000000 0 0xffffffff>;
@@ -2784,6 +2800,7 @@
qcom,pas-id = <0xf>;
qcom,firmware-name = "ipa_fws";
qcom,pil-force-shutdown;
+ memory-region = <&pil_ipa_fw_mem>;
};
qcom,chd_sliver {
@@ -3064,6 +3081,7 @@
<0 424 0 /* CE10 */ >,
<0 425 0 /* CE11 */ >;
qcom,wlan-msa-memory = <0x100000>;
+ qcom,gpio-force-fatal-error = <&smp2pgpio_wlan_1_in 0 0>;
vdd-0.8-cx-mx-supply = <&pm8998_l5>;
vdd-1.8-xo-supply = <&pm8998_l7>;
@@ -3757,7 +3775,7 @@
};
fcm_dump {
- qcom,dump-size = <0x400>;
+ qcom,dump-size = <0x8400>;
qcom,dump-id = <0xee>;
};
diff --git a/arch/arm64/configs/msm8953-perf_defconfig b/arch/arm64/configs/msm8953-perf_defconfig
index e99c988..de90d43 100644
--- a/arch/arm64/configs/msm8953-perf_defconfig
+++ b/arch/arm64/configs/msm8953-perf_defconfig
@@ -53,6 +53,7 @@
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_MSM8953=y
CONFIG_ARCH_SDM450=y
+CONFIG_ARCH_SDM632=y
CONFIG_SCHED_MC=y
CONFIG_NR_CPUS=8
CONFIG_PREEMPT=y
@@ -315,6 +316,7 @@
CONFIG_SMB135X_CHARGER=y
CONFIG_SMB1351_USB_CHARGER=y
CONFIG_QPNP_SMBCHARGER=y
+CONFIG_QPNP_TYPEC=y
CONFIG_MSM_APM=y
CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
CONFIG_THERMAL=y
@@ -398,9 +400,20 @@
CONFIG_USB_GADGET_DEBUG_FS=y
CONFIG_USB_GADGET_VBUS_DRAW=500
CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_QCRNDIS=y
+CONFIG_USB_CONFIGFS_RMNET_BAM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_HID=y
CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
CONFIG_MMC=y
CONFIG_MMC_PERF_PROFILING=y
CONFIG_MMC_PARANOID_SD_INIT=y
@@ -433,11 +446,9 @@
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
CONFIG_ION=y
CONFIG_ION_MSM=y
-CONFIG_GSI=y
-CONFIG_IPA3=y
-CONFIG_RMNET_IPA3=y
+CONFIG_IPA=y
+CONFIG_RMNET_IPA=y
CONFIG_RNDIS_IPA=y
-CONFIG_IPA_UT=y
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_QPNP_COINCELL=y
@@ -459,6 +470,7 @@
CONFIG_MSM_SMEM=y
CONFIG_MSM_SMD=y
CONFIG_MSM_SMD_DEBUG=y
+CONFIG_MSM_TZ_SMMU=y
CONFIG_MSM_SMP2P=y
CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
CONFIG_MSM_QMI_INTERFACE=y
diff --git a/arch/arm64/configs/msm8953_defconfig b/arch/arm64/configs/msm8953_defconfig
index e72d0b7..8145f47 100644
--- a/arch/arm64/configs/msm8953_defconfig
+++ b/arch/arm64/configs/msm8953_defconfig
@@ -57,6 +57,7 @@
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_MSM8953=y
CONFIG_ARCH_SDM450=y
+CONFIG_ARCH_SDM632=y
CONFIG_SCHED_MC=y
CONFIG_NR_CPUS=8
CONFIG_PREEMPT=y
@@ -325,6 +326,7 @@
CONFIG_SMB135X_CHARGER=y
CONFIG_SMB1351_USB_CHARGER=y
CONFIG_QPNP_SMBCHARGER=y
+CONFIG_QPNP_TYPEC=y
CONFIG_MSM_APM=y
CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
CONFIG_THERMAL=y
@@ -409,9 +411,20 @@
CONFIG_USB_GADGET_DEBUG_FS=y
CONFIG_USB_GADGET_VBUS_DRAW=500
CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_QCRNDIS=y
+CONFIG_USB_CONFIGFS_RMNET_BAM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_HID=y
CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
CONFIG_MMC=y
CONFIG_MMC_PERF_PROFILING=y
CONFIG_MMC_RING_BUFFER=y
@@ -445,9 +458,8 @@
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
CONFIG_ION=y
CONFIG_ION_MSM=y
-CONFIG_GSI=y
-CONFIG_IPA3=y
-CONFIG_RMNET_IPA3=y
+CONFIG_IPA=y
+CONFIG_RMNET_IPA=y
CONFIG_RNDIS_IPA=y
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_NDP_BAM=y
@@ -475,6 +487,7 @@
CONFIG_MSM_SMEM=y
CONFIG_MSM_SMD=y
CONFIG_MSM_SMD_DEBUG=y
+CONFIG_MSM_TZ_SMMU=y
CONFIG_TRACER_PKT=y
CONFIG_MSM_SMP2P=y
CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
diff --git a/arch/arm64/configs/sdm670-perf_defconfig b/arch/arm64/configs/sdm670-perf_defconfig
index e8fe5bc..1904209 100644
--- a/arch/arm64/configs/sdm670-perf_defconfig
+++ b/arch/arm64/configs/sdm670-perf_defconfig
@@ -21,8 +21,6 @@
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
-CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_BPF=y
@@ -39,7 +37,6 @@
# CONFIG_RD_LZ4 is not set
CONFIG_KALLSYMS_ALL=y
CONFIG_BPF_SYSCALL=y
-# CONFIG_AIO is not set
# CONFIG_MEMBARRIER is not set
CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
@@ -326,6 +323,7 @@
CONFIG_QCOM_DLOAD_MODE=y
CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y
+CONFIG_NX30P6093=y
CONFIG_QPNP_FG_GEN3=y
CONFIG_SMB1355_SLAVE_CHARGER=y
CONFIG_QPNP_SMB2=y
@@ -512,6 +510,7 @@
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QPNP_PBS=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_MINIDUMP=y
CONFIG_QCOM_BUS_SCALING=y
CONFIG_QCOM_BUS_CONFIG_RPMH=y
CONFIG_QCOM_SECURE_BUFFER=y
diff --git a/arch/arm64/configs/sdm670_defconfig b/arch/arm64/configs/sdm670_defconfig
index ca923f1..670627d 100644
--- a/arch/arm64/configs/sdm670_defconfig
+++ b/arch/arm64/configs/sdm670_defconfig
@@ -22,8 +22,6 @@
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
-CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
CONFIG_DEBUG_BLK_CGROUP=y
CONFIG_RT_GROUP_SCHED=y
@@ -41,7 +39,6 @@
# CONFIG_RD_LZ4 is not set
CONFIG_KALLSYMS_ALL=y
CONFIG_BPF_SYSCALL=y
-# CONFIG_AIO is not set
# CONFIG_MEMBARRIER is not set
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
@@ -332,6 +329,7 @@
CONFIG_QCOM_DLOAD_MODE=y
CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y
+CONFIG_NX30P6093=y
CONFIG_QPNP_FG_GEN3=y
CONFIG_SMB1355_SLAVE_CHARGER=y
CONFIG_QPNP_SMB2=y
@@ -528,6 +526,7 @@
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QPNP_PBS=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_MINIDUMP=y
CONFIG_QCOM_BUS_SCALING=y
CONFIG_QCOM_BUS_CONFIG_RPMH=y
CONFIG_QCOM_SECURE_BUFFER=y
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 357a6b2..0ff77bd 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -37,7 +37,6 @@
# CONFIG_RD_LZ4 is not set
CONFIG_KALLSYMS_ALL=y
CONFIG_BPF_SYSCALL=y
-# CONFIG_AIO is not set
# CONFIG_MEMBARRIER is not set
CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
@@ -438,7 +437,7 @@
CONFIG_MMC_CLKGATE=y
CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
-CONFIG_MMC_TEST=y
+CONFIG_MMC_TEST=m
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index d0a32e7..c9c1f28 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -38,7 +38,6 @@
# CONFIG_RD_LZ4 is not set
CONFIG_KALLSYMS_ALL=y
CONFIG_BPF_SYSCALL=y
-# CONFIG_AIO is not set
# CONFIG_MEMBARRIER is not set
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
@@ -441,7 +440,7 @@
CONFIG_MMC_CLKGATE=y
CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
-CONFIG_MMC_TEST=y
+CONFIG_MMC_TEST=m
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
diff --git a/drivers/bluetooth/btfm_slim_wcn3990.c b/drivers/bluetooth/btfm_slim_wcn3990.c
index 9f2bf90..6615189 100644
--- a/drivers/bluetooth/btfm_slim_wcn3990.c
+++ b/drivers/bluetooth/btfm_slim_wcn3990.c
@@ -85,14 +85,12 @@
uint8_t reg_val = 0, en;
uint8_t rxport_num = 0;
uint16_t reg;
+ uint8_t prev_reg_val = 0;
BTFMSLIM_DBG("port(%d) enable(%d)", port_num, enable);
if (rxport) {
BTFMSLIM_DBG("sample rate is %d", btfmslim->sample_rate);
- if (enable &&
- btfmslim->sample_rate != 44100 &&
- btfmslim->sample_rate != 88200) {
- BTFMSLIM_DBG("setting multichannel bit");
+ if (enable) {
/* For SCO Rx, A2DP Rx other than 44.1 and 88.2Khz */
if (port_num < 24) {
rxport_num = port_num - 16;
@@ -106,6 +104,21 @@
rxport_num);
}
+ if (btfmslim->sample_rate == 44100 ||
+ btfmslim->sample_rate == 88200) {
+ BTFMSLIM_DBG("unsetting multichannel bit");
+ ret = btfm_slim_read(btfmslim, reg, 1,
+ &prev_reg_val, IFD);
+ if (ret < 0) {
+ BTFMSLIM_ERR("error %d reading", ret);
+ prev_reg_val = 0;
+ }
+ BTFMSLIM_DBG("prev_reg_val (%d) from reg(%x)",
+ prev_reg_val, reg);
+ reg_val = prev_reg_val & ~reg_val;
+ } else
+ BTFMSLIM_DBG("setting multichannel bit");
+
BTFMSLIM_DBG("writing reg_val (%d) to reg(%x)",
reg_val, reg);
ret = btfm_slim_write(btfmslim, reg, 1, ®_val, IFD);
diff --git a/drivers/char/diag/diag_mux.c b/drivers/char/diag/diag_mux.c
index e65b493..c31998c 100644
--- a/drivers/char/diag/diag_mux.c
+++ b/drivers/char/diag/diag_mux.c
@@ -205,6 +205,8 @@
new_mask = ~(*peripheral_mask) & diag_mux->mux_mask;
if (new_mask != DIAG_CON_NONE)
*req_mode = DIAG_MULTI_MODE;
+ if (new_mask == DIAG_CON_ALL)
+ *req_mode = DIAG_MEMORY_DEVICE_MODE;
break;
case DIAG_MEMORY_DEVICE_MODE:
new_mask = (*peripheral_mask) | diag_mux->mux_mask;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index a1c9d68..0158549 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -1656,7 +1656,9 @@
}
}
}
- if (!param->diag_id) {
+ if (!param->diag_id ||
+ (param->pd_val < UPD_WLAN) ||
+ (param->pd_val > NUM_MD_SESSIONS)) {
DIAG_LOG(DIAG_DEBUG_USERSPACE,
"diag_id support is not present for the pd mask = %d\n",
param->pd_mask);
@@ -1669,19 +1671,19 @@
param->peripheral, param->pd_val);
peripheral = param->peripheral;
+ i = param->pd_val - UPD_WLAN;
if (driver->md_session_map[peripheral] &&
(MD_PERIPHERAL_MASK(peripheral) &
- diag_mux->mux_mask)) {
+ diag_mux->mux_mask) &&
+ !driver->pd_session_clear[i]) {
DIAG_LOG(DIAG_DEBUG_USERSPACE,
"diag_fr: User PD is already logging onto active peripheral logging\n");
- i = param->pd_val - UPD_WLAN;
driver->pd_session_clear[i] = 0;
return -EINVAL;
}
peripheral_mask =
diag_translate_mask(param->pd_mask);
param->peripheral_mask = peripheral_mask;
- i = param->pd_val - UPD_WLAN;
if (!driver->pd_session_clear[i]) {
driver->pd_logging_mode[i] = 1;
driver->num_pd_session += 1;
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index f157a2f..83f44ce 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1053,8 +1053,11 @@
write_len = diag_send_data(reg_item, buf, len);
} else {
if (MD_PERIPHERAL_MASK(reg_item->proc) &
- driver->logging_mask)
+ driver->logging_mask) {
+ mutex_unlock(&driver->cmd_reg_mutex);
diag_send_error_rsp(buf, len, info);
+ return write_len;
+ }
else
write_len = diag_send_data(reg_item, buf, len);
}
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 433f768..da0e81d 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -122,10 +122,12 @@
#define DEFAULT_IPC_LOG_LVL (LOG_LVL_VERBOSE)
#define IPC_LOG_PAGES (40)
#define GPI_DBG_LOG_SIZE (SZ_1K) /* size must be power of 2 */
+#define CMD_TIMEOUT_MS (1000)
#else
#define IPC_LOG_PAGES (2)
#define GPI_DBG_LOG_SIZE (0) /* size must be power of 2 */
#define DEFAULT_IPC_LOG_LVL (LOG_LVL_ERROR)
+#define CMD_TIMEOUT_MS (250)
#endif
#define GPI_LABEL_SIZE (256)
@@ -133,7 +135,6 @@
#define MAX_CHANNELS_PER_GPII (2)
#define GPI_TX_CHAN (0)
#define GPI_RX_CHAN (1)
-#define CMD_TIMEOUT_MS (50)
#define STATE_IGNORE (U32_MAX)
#define REQ_OF_DMA_ARGS (5) /* # of arguments required from client */
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index fe00bea..ca227e8 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -45,6 +45,8 @@
/* from BKL pushdown */
DEFINE_MUTEX(drm_global_mutex);
+#define MAX_DRM_OPEN_COUNT 20
+
/**
* DOC: file operations
*
@@ -135,6 +137,11 @@
if (!dev->open_count++)
need_setup = 1;
+ if (dev->open_count >= MAX_DRM_OPEN_COUNT) {
+ retcode = -EPERM;
+ goto err_undo;
+ }
+
/* share address_space across all char-devs of a single device */
filp->f_mapping = dev->anon_inode->i_mapping;
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index 9b79a5b..174057b 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -26,6 +26,9 @@
#include "drm_crtc_internal.h"
+#define MAX_BLOB_PROP_SIZE (PAGE_SIZE * 30)
+#define MAX_BLOB_PROP_COUNT 250
+
/**
* DOC: overview
*
@@ -554,7 +557,8 @@
struct drm_property_blob *blob;
int ret;
- if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
+ if (!length || length > MAX_BLOB_PROP_SIZE -
+ sizeof(struct drm_property_blob))
return ERR_PTR(-EINVAL);
blob = vzalloc(sizeof(struct drm_property_blob)+length);
@@ -756,13 +760,20 @@
void *data, struct drm_file *file_priv)
{
struct drm_mode_create_blob *out_resp = data;
- struct drm_property_blob *blob;
+ struct drm_property_blob *blob, *bt;
void __user *blob_ptr;
int ret = 0;
+ u32 count = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
+ list_for_each_entry(bt, &file_priv->blobs, head_file)
+ count++;
+
+ if (count == MAX_BLOB_PROP_COUNT)
+ return -EINVAL;
+
blob = drm_property_create_blob(dev, out_resp->length, NULL);
if (IS_ERR(blob))
return PTR_ERR(blob);
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
index 6342fef..0b3d903 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.c
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -345,12 +345,24 @@
return len;
}
+static int dp_debug_check_buffer_overflow(int rc, int *max_size, int *len)
+{
+ if (rc >= *max_size) {
+ pr_err("buffer overflow\n");
+ return -EINVAL;
+ }
+ *len += rc;
+ *max_size = SZ_4K - *len;
+
+ return 0;
+}
+
static ssize_t dp_debug_read_edid_modes(struct file *file,
char __user *user_buff, size_t count, loff_t *ppos)
{
struct dp_debug_private *debug = file->private_data;
char *buf;
- u32 len = 0;
+ u32 len = 0, ret = 0, max_size = SZ_4K;
int rc = 0;
struct drm_connector *connector;
struct drm_display_mode *mode;
@@ -380,12 +392,12 @@
mutex_lock(&connector->dev->mode_config.mutex);
list_for_each_entry(mode, &connector->modes, head) {
- len += snprintf(buf + len, SZ_4K - len,
- "%s %d %d %d %d %d %d %d %d %d %d 0x%x\n",
+ ret = snprintf(buf + len, max_size,
+ "%s %d %d %d %d %d 0x%x\n",
mode->name, mode->vrefresh, mode->picture_aspect_ratio,
- mode->hdisplay, mode->hsync_start, mode->hsync_end,
- mode->htotal, mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal, mode->flags);
+ mode->htotal, mode->vtotal, mode->clock, mode->flags);
+ if (dp_debug_check_buffer_overflow(ret, &max_size, &len))
+ break;
}
mutex_unlock(&connector->dev->mode_config.mutex);
@@ -403,18 +415,6 @@
return rc;
}
-static int dp_debug_check_buffer_overflow(int rc, int *max_size, int *len)
-{
- if (rc >= *max_size) {
- pr_err("buffer overflow\n");
- return -EINVAL;
- }
- *len += rc;
- *max_size = SZ_4K - *len;
-
- return 0;
-}
-
static ssize_t dp_debug_read_info(struct file *file, char __user *user_buff,
size_t count, loff_t *ppos)
{
diff --git a/drivers/gpu/drm/msm/dp/dp_usbpd.c b/drivers/gpu/drm/msm/dp/dp_usbpd.c
index 3ddc499..2bd3bd4 100644
--- a/drivers/gpu/drm/msm/dp/dp_usbpd.c
+++ b/drivers/gpu/drm/msm/dp/dp_usbpd.c
@@ -352,11 +352,15 @@
pd->vdo = *vdos;
dp_usbpd_get_status(pd);
+ if (!pd->dp_usbpd.alt_mode_cfg_done) {
+ if (pd->dp_usbpd.port & BIT(1))
+ dp_usbpd_send_event(pd, DP_USBPD_EVT_CONFIGURE);
+ break;
+ }
+
if (pd->dp_cb && pd->dp_cb->attention)
pd->dp_cb->attention(pd->dev);
- if (!pd->dp_usbpd.alt_mode_cfg_done)
- dp_usbpd_send_event(pd, DP_USBPD_EVT_CONFIGURE);
break;
case DP_USBPD_VDM_STATUS:
pd->vdo = *vdos;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.c b/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.c
index 5635346..c1ea8dd 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.c
@@ -111,13 +111,13 @@
regs->vregs[i].post_off_sleep = tmp;
}
- ++i;
pr_debug("[%s] minv=%d maxv=%d, en_load=%d, dis_load=%d\n",
regs->vregs[i].vreg_name,
regs->vregs[i].min_voltage,
regs->vregs[i].max_voltage,
regs->vregs[i].enable_load,
regs->vregs[i].disable_load);
+ ++i;
}
error:
diff --git a/drivers/gpu/drm/msm/msm_prop.c b/drivers/gpu/drm/msm/msm_prop.c
index 8e85c81..461868d 100644
--- a/drivers/gpu/drm/msm/msm_prop.c
+++ b/drivers/gpu/drm/msm/msm_prop.c
@@ -418,24 +418,29 @@
} else {
/* extra handling for incoming properties */
mutex_lock(&info->property_lock);
- if (val && (property->flags & DRM_MODE_PROP_BLOB) &&
+ if ((property->flags & DRM_MODE_PROP_BLOB) &&
(property_idx < info->blob_count)) {
+
+ /* need to clear previous ref */
+ if (property_state->values[property_idx].blob)
+ drm_property_unreference_blob(
+ property_state->values[
+ property_idx].blob);
+
/* DRM lookup also takes a reference */
blob = drm_property_lookup_blob(info->dev,
(uint32_t)val);
- if (!blob) {
+ if (val && !blob) {
DRM_ERROR("prop %d blob id 0x%llx not found\n",
property_idx, val);
val = 0;
} else {
- DBG("Blob %u saved", blob->base.id);
- val = blob->base.id;
+ if (blob) {
+ DBG("Blob %u saved", blob->base.id);
+ val = blob->base.id;
+ }
- /* save blob - need to clear previous ref */
- if (property_state->values[property_idx].blob)
- drm_property_unreference_blob(
- property_state->values[
- property_idx].blob);
+ /* save the new blob */
property_state->values[property_idx].blob =
blob;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_ad4.h b/drivers/gpu/drm/msm/sde/sde_ad4.h
index 5a646e9..06f004c 100644
--- a/drivers/gpu/drm/msm/sde/sde_ad4.h
+++ b/drivers/gpu/drm/msm/sde/sde_ad4.h
@@ -58,7 +58,7 @@
* enum ad_intr_resp_property - ad4 interrupt response enum
*/
enum ad_intr_resp_property {
- AD4_BACKLIGHT,
+ AD4_IN_OUT_BACKLIGHT,
AD4_RESPMAX,
};
@@ -92,8 +92,10 @@
* sde_read_intr_resp_ad4 - api to get ad4 interrupt status for event
* @dspp: pointer to dspp object
* @event: event for which response is needed
- * @resp: value of event requested
+ * @resp_in: read ad4 input value of event requested
+ * @resp_out: read ad4 output value of event requested
*/
-void sde_read_intr_resp_ad4(struct sde_hw_dspp *dspp, u32 event, u32 *resp);
+void sde_read_intr_resp_ad4(struct sde_hw_dspp *dspp, u32 event,
+ u32 *resp_in, u32 *resp_out);
#endif /* _SDE_AD4_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index 07b5536..42aea7e 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -24,6 +24,7 @@
#include "sde_ad4.h"
#include "sde_hw_interrupts.h"
#include "sde_core_irq.h"
+#include "dsi_panel.h"
struct sde_cp_node {
u32 property_id;
@@ -1575,7 +1576,8 @@
static void sde_cp_notify_ad_event(struct drm_crtc *crtc_drm, void *arg)
{
- uint32_t bl = 0;
+ uint32_t input_bl = 0, output_bl = 0;
+ uint32_t scale = MAX_AD_BL_SCALE_LEVEL;
struct sde_hw_mixer *hw_lm = NULL;
struct sde_hw_dspp *hw_dspp = NULL;
u32 num_mixers;
@@ -1598,11 +1600,17 @@
if (!hw_dspp)
return;
- hw_dspp->ops.ad_read_intr_resp(hw_dspp, AD4_BACKLIGHT, &bl);
+ hw_dspp->ops.ad_read_intr_resp(hw_dspp, AD4_IN_OUT_BACKLIGHT,
+ &input_bl, &output_bl);
+
+ if (!input_bl || input_bl < output_bl)
+ return;
+
+ scale = (output_bl * MAX_AD_BL_SCALE_LEVEL) / input_bl;
event.length = sizeof(u32);
event.type = DRM_EVENT_AD_BACKLIGHT;
msm_mode_object_event_notify(&crtc_drm->base, crtc_drm->dev,
- &event, (u8 *)&bl);
+ &event, (u8 *)&scale);
}
int sde_cp_ad_interrupt(struct drm_crtc *crtc_drm, bool en,
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 08b3657..cfe4419 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -426,20 +426,6 @@
}
}
-void sde_connector_helper_bridge_disable(struct drm_connector *connector)
-{
- int rc;
-
- if (!connector)
- return;
-
- /* trigger a final connector pre-kickoff for power mode updates */
- rc = sde_connector_pre_kickoff(connector);
- if (rc)
- SDE_ERROR("conn %d final pre kickoff failed %d\n",
- connector->base.id, rc);
-}
-
static int _sde_connector_update_power_locked(struct sde_connector *c_conn)
{
struct drm_connector *connector;
@@ -530,12 +516,12 @@
return rc;
}
-int sde_connector_pre_kickoff(struct drm_connector *connector)
+static int _sde_connector_update_dirty_properties(
+ struct drm_connector *connector)
{
struct sde_connector *c_conn;
struct sde_connector_state *c_state;
- struct msm_display_kickoff_params params;
- int idx, rc;
+ int idx;
if (!connector) {
SDE_ERROR("invalid argument\n");
@@ -545,11 +531,6 @@
c_conn = to_sde_connector(connector);
c_state = to_sde_connector_state(connector->state);
- if (!c_conn->display) {
- SDE_ERROR("invalid argument\n");
- return -EINVAL;
- }
-
while ((idx = msm_property_pop_dirty(&c_conn->property_info,
&c_state->property_state)) >= 0) {
switch (idx) {
@@ -576,6 +557,34 @@
c_conn->bl_scale_dirty = false;
}
+ return 0;
+}
+
+int sde_connector_pre_kickoff(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn;
+ struct sde_connector_state *c_state;
+ struct msm_display_kickoff_params params;
+ int rc;
+
+ if (!connector) {
+ SDE_ERROR("invalid argument\n");
+ return -EINVAL;
+ }
+
+ c_conn = to_sde_connector(connector);
+ c_state = to_sde_connector_state(connector->state);
+ if (!c_conn->display) {
+ SDE_ERROR("invalid connector display\n");
+ return -EINVAL;
+ }
+
+ rc = _sde_connector_update_dirty_properties(connector);
+ if (rc) {
+ SDE_EVT32(connector->base.id, SDE_EVTLOG_ERROR);
+ goto end;
+ }
+
if (!c_conn->ops.pre_kickoff)
return 0;
@@ -586,9 +595,25 @@
rc = c_conn->ops.pre_kickoff(connector, c_conn->display, ¶ms);
+end:
return rc;
}
+void sde_connector_helper_bridge_disable(struct drm_connector *connector)
+{
+ int rc;
+
+ if (!connector)
+ return;
+
+ rc = _sde_connector_update_dirty_properties(connector);
+ if (rc) {
+ SDE_ERROR("conn %d final pre kickoff failed %d\n",
+ connector->base.id, rc);
+ SDE_EVT32(connector->base.id, SDE_EVTLOG_ERROR);
+ }
+}
+
int sde_connector_clk_ctrl(struct drm_connector *connector, bool enable)
{
struct sde_connector *c_conn;
@@ -1791,7 +1816,7 @@
struct sde_connector *c_conn = NULL;
struct sde_connector_state *sde_conn_state = NULL;
struct msm_mode_info mode_info;
- struct drm_property_blob *blob = NULL;
+ struct drm_property_blob **blob = NULL;
int rc = 0;
c_conn = to_sde_connector(conn);
@@ -1835,7 +1860,7 @@
}
}
- blob = c_conn->blob_caps;
+ blob = &c_conn->blob_caps;
break;
case CONNECTOR_PROP_MODE_INFO:
rc = sde_connector_populate_mode_info(conn, info);
@@ -1845,7 +1870,7 @@
rc);
goto exit;
}
- blob = c_conn->blob_mode_info;
+ blob = &c_conn->blob_mode_info;
break;
default:
SDE_ERROR_CONN(c_conn, "invalid prop_id: %d\n", prop_id);
@@ -1853,7 +1878,7 @@
};
msm_property_set_blob(&c_conn->property_info,
- &blob,
+ blob,
SDE_KMS_INFO_DATA(info),
SDE_KMS_INFO_DATALEN(info),
prop_id);
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index cc9d220..7e11fea 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -1404,7 +1404,8 @@
}
static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
- struct sde_crtc *sde_crtc, struct sde_crtc_mixer *mixer)
+ struct drm_crtc_state *old_state, struct sde_crtc *sde_crtc,
+ struct sde_crtc_mixer *mixer)
{
struct drm_plane *plane;
struct drm_framebuffer *fb;
@@ -1424,7 +1425,7 @@
bool bg_alpha_enable = false;
u32 prefill = 0;
- if (!sde_crtc || !mixer) {
+ if (!sde_crtc || !crtc->state || !mixer) {
SDE_ERROR("invalid sde_crtc or mixer\n");
return;
}
@@ -1435,7 +1436,9 @@
cstate = to_sde_crtc_state(crtc->state);
cstate->sbuf_prefill_line = 0;
- sde_crtc->sbuf_flush_mask = 0x0;
+ sde_crtc->sbuf_flush_mask_old = sde_crtc->sbuf_flush_mask_all;
+ sde_crtc->sbuf_flush_mask_all = 0x0;
+ sde_crtc->sbuf_flush_mask_delta = 0x0;
drm_atomic_crtc_for_each_plane(plane, crtc) {
state = plane->state;
@@ -1457,7 +1460,10 @@
sde_plane_get_ctl_flush(plane, ctl, &flush_mask, &flush_sbuf);
/* save sbuf flush value for later */
- sde_crtc->sbuf_flush_mask |= flush_sbuf;
+ if (old_state && drm_atomic_get_existing_plane_state(
+ old_state->state, plane))
+ sde_crtc->sbuf_flush_mask_delta |= flush_sbuf;
+ sde_crtc->sbuf_flush_mask_all |= flush_sbuf;
SDE_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
crtc->base.id,
@@ -1584,8 +1590,11 @@
/**
* _sde_crtc_blend_setup - configure crtc mixers
* @crtc: Pointer to drm crtc structure
+ * @old_state: Pointer to old crtc state
+ * @add_planes: Whether or not to add planes to mixers
*/
-static void _sde_crtc_blend_setup(struct drm_crtc *crtc, bool add_planes)
+static void _sde_crtc_blend_setup(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state, bool add_planes)
{
struct sde_crtc *sde_crtc;
struct sde_crtc_state *sde_crtc_state;
@@ -1632,7 +1641,7 @@
memset(&sde_crtc->stage_cfg, 0, sizeof(struct sde_hw_stage_cfg));
if (add_planes)
- _sde_crtc_blend_setup_mixer(crtc, sde_crtc, mixer);
+ _sde_crtc_blend_setup_mixer(crtc, old_state, sde_crtc, mixer);
for (i = 0; i < sde_crtc->num_mixers; i++) {
const struct sde_rect *lm_roi = &sde_crtc_state->lm_roi[i];
@@ -3198,7 +3207,7 @@
if (unlikely(!sde_crtc->num_mixers))
return;
- _sde_crtc_blend_setup(crtc, true);
+ _sde_crtc_blend_setup(crtc, old_state, true);
_sde_crtc_dest_scaler_setup(crtc);
/* cancel the idle notify delayed work */
@@ -3422,23 +3431,29 @@
sde_crtc = to_sde_crtc(crtc);
/*
- * Update sbuf configuration and flush bits if a flush
- * mask has been defined for either the current or
- * previous commit.
+ * Update sbuf configuration and flush bits if either the rot_op_mode
+ * is different or a rotator commit was performed.
*
- * Updates are also required for the first commit after
- * sbuf_flush_mask becomes 0x0, to properly transition
- * the hardware out of sbuf mode.
+ * In the case where the rot_op_mode has changed, further require that
+ * the transition is either to or from offline mode unless
+ * sbuf_flush_mask_delta is also non-zero (i.e., a corresponding plane
+ * update was provided to the current commit).
*/
- if (!sde_crtc->sbuf_flush_mask_old && !sde_crtc->sbuf_flush_mask)
- return 0;
+ flush_mask = sde_crtc->sbuf_flush_mask_delta;
+ if ((sde_crtc->sbuf_op_mode_old != cstate->sbuf_cfg.rot_op_mode) &&
+ (sde_crtc->sbuf_op_mode_old == SDE_CTL_ROT_OP_MODE_OFFLINE ||
+ cstate->sbuf_cfg.rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE))
+ flush_mask |= sde_crtc->sbuf_flush_mask_all |
+ sde_crtc->sbuf_flush_mask_old;
- flush_mask = sde_crtc->sbuf_flush_mask_old | sde_crtc->sbuf_flush_mask;
- sde_crtc->sbuf_flush_mask_old = sde_crtc->sbuf_flush_mask;
+ if (!flush_mask &&
+ cstate->sbuf_cfg.rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE)
+ return 0;
SDE_ATRACE_BEGIN("crtc_kickoff_rot");
- if (cstate->sbuf_cfg.rot_op_mode != SDE_CTL_ROT_OP_MODE_OFFLINE) {
+ if (cstate->sbuf_cfg.rot_op_mode != SDE_CTL_ROT_OP_MODE_OFFLINE &&
+ sde_crtc->sbuf_flush_mask_delta) {
drm_atomic_crtc_for_each_plane(plane, crtc) {
rc = sde_plane_kickoff_rot(plane);
if (rc) {
@@ -3474,12 +3489,16 @@
/* explicitly trigger rotator for async modes */
if (cstate->sbuf_cfg.rot_op_mode ==
SDE_CTL_ROT_OP_MODE_INLINE_ASYNC &&
- master_ctl->ops.trigger_rot_start) {
+ master_ctl->ops.trigger_rot_start)
master_ctl->ops.trigger_rot_start(master_ctl);
- SDE_EVT32(DRMID(crtc), master_ctl->idx - CTL_0);
- }
+ SDE_EVT32(DRMID(crtc), master_ctl->idx - CTL_0,
+ sde_crtc->sbuf_flush_mask_all,
+ sde_crtc->sbuf_flush_mask_delta);
}
+ /* save this in sde_crtc for next commit cycle */
+ sde_crtc->sbuf_op_mode_old = cstate->sbuf_cfg.rot_op_mode;
+
SDE_ATRACE_END("crtc_kickoff_rot");
return rc;
}
@@ -3492,13 +3511,14 @@
{
struct sde_crtc_mixer *mixer;
struct sde_hw_ctl *ctl;
- u32 i, flush_mask;
+ u32 i, n, flush_mask;
if (!sde_crtc)
return;
mixer = sde_crtc->mixers;
- for (i = 0; i < sde_crtc->num_mixers; i++) {
+ n = min_t(size_t, sde_crtc->num_mixers, ARRAY_SIZE(sde_crtc->mixers));
+ for (i = 0; i < n; i++) {
ctl = mixer[i].hw_ctl;
if (!ctl || !ctl->ops.get_pending_flush ||
!ctl->ops.clear_pending_flush ||
@@ -3524,16 +3544,19 @@
{
struct drm_plane *plane_halt[MAX_PLANES];
struct drm_plane *plane;
+ struct drm_encoder *encoder;
const struct drm_plane_state *pstate;
struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *cstate;
struct sde_hw_ctl *ctl;
enum sde_ctl_rot_op_mode old_rot_op_mode;
- signed int i, plane_count;
+ signed int i, n, plane_count;
int rc;
- if (!crtc || !old_state)
+ if (!crtc || !crtc->dev || !old_state || !crtc->state)
return -EINVAL;
sde_crtc = to_sde_crtc(crtc);
+ cstate = to_sde_crtc_state(crtc->state);
old_rot_op_mode = to_sde_crtc_state(old_state)->sbuf_cfg.rot_op_mode;
SDE_EVT32(DRMID(crtc), old_rot_op_mode,
@@ -3545,7 +3568,8 @@
/* optionally generate a panic instead of performing a h/w reset */
SDE_DBG_CTRL("stop_ftrace", "reset_hw_panic");
- for (i = 0; i < sde_crtc->num_mixers; ++i) {
+ n = min_t(size_t, sde_crtc->num_mixers, ARRAY_SIZE(sde_crtc->mixers));
+ for (i = 0; i < n; ++i) {
ctl = sde_crtc->mixers[i].hw_ctl;
if (!ctl || !ctl->ops.reset)
continue;
@@ -3570,14 +3594,13 @@
* depending on the rotation mode; don't handle this for now
* and just force a hard reset in those cases.
*/
- if (i == sde_crtc->num_mixers &&
- old_rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE)
+ if (i == n && old_rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE)
return false;
SDE_DEBUG("crtc%d: issuing hard reset\n", DRMID(crtc));
/* force all components in the system into reset at the same time */
- for (i = 0; i < sde_crtc->num_mixers; ++i) {
+ for (i = 0; i < n; ++i) {
ctl = sde_crtc->mixers[i].hw_ctl;
if (!ctl || !ctl->ops.hard_reset)
continue;
@@ -3613,11 +3636,26 @@
sde_plane_reset_rot(plane, (struct drm_plane_state *)pstate);
}
+ /* provide safe "border color only" commit configuration for later */
+ cstate->sbuf_cfg.rot_op_mode = SDE_CTL_ROT_OP_MODE_OFFLINE;
+ _sde_crtc_commit_kickoff_rot(crtc, cstate);
+ _sde_crtc_remove_pipe_flush(sde_crtc);
+ _sde_crtc_blend_setup(crtc, old_state, false);
+
/* take h/w components out of reset */
for (i = plane_count - 1; i >= 0; --i)
sde_plane_halt_requests(plane_halt[i], false);
- for (i = 0; i < sde_crtc->num_mixers; ++i) {
+ /* attempt to poll for start of frame cycle before reset release */
+ list_for_each_entry(encoder,
+ &crtc->dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+ if (sde_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
+ sde_encoder_poll_line_counts(encoder);
+ }
+
+ for (i = 0; i < n; ++i) {
ctl = sde_crtc->mixers[i].hw_ctl;
if (!ctl || !ctl->ops.hard_reset)
continue;
@@ -3625,6 +3663,15 @@
ctl->ops.hard_reset(ctl, false);
}
+ list_for_each_entry(encoder,
+ &crtc->dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ if (sde_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
+ sde_encoder_kickoff(encoder, false);
+ }
+
return -EAGAIN;
}
@@ -3649,7 +3696,7 @@
cstate = to_sde_crtc_state(crtc->state);
/* default to ASYNC mode for inline rotation */
- cstate->sbuf_cfg.rot_op_mode = sde_crtc->sbuf_flush_mask ?
+ cstate->sbuf_cfg.rot_op_mode = sde_crtc->sbuf_flush_mask_all ?
SDE_CTL_ROT_OP_MODE_INLINE_ASYNC : SDE_CTL_ROT_OP_MODE_OFFLINE;
if (cstate->sbuf_cfg.rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE)
@@ -3749,11 +3796,6 @@
if (_sde_crtc_reset_hw(crtc, old_state,
!sde_crtc->reset_request))
is_error = true;
-
- /* force offline rotation mode since the commit has no pipes */
- if (is_error)
- cstate->sbuf_cfg.rot_op_mode =
- SDE_CTL_ROT_OP_MODE_OFFLINE;
}
sde_crtc->reset_request = reset_req;
@@ -3799,7 +3841,7 @@
if (is_error) {
_sde_crtc_remove_pipe_flush(sde_crtc);
- _sde_crtc_blend_setup(crtc, false);
+ _sde_crtc_blend_setup(crtc, old_state, false);
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -4612,7 +4654,7 @@
/* identify attached planes that are not in the delta state */
if (!drm_atomic_get_existing_plane_state(state->state, plane)) {
- rc = sde_plane_confirm_hw_rsvps(plane, pstate);
+ rc = sde_plane_confirm_hw_rsvps(plane, pstate, state);
if (rc) {
SDE_ERROR("crtc%d confirmation hw failed %d\n",
crtc->base.id, rc);
@@ -5364,7 +5406,7 @@
struct drm_plane_state *state;
struct sde_crtc_state *cstate;
- int i, out_width;
+ int i, out_width, out_height;
if (!s || !s->private)
return -EINVAL;
@@ -5376,6 +5418,7 @@
mutex_lock(&sde_crtc->crtc_lock);
mode = &crtc->state->adjusted_mode;
out_width = sde_crtc_get_mixer_width(sde_crtc, cstate, mode);
+ out_height = sde_crtc_get_mixer_height(sde_crtc, cstate, mode);
seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
mode->hdisplay, mode->vdisplay);
@@ -5391,7 +5434,7 @@
else
seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
m->hw_lm->idx - LM_0, m->hw_ctl->idx - CTL_0,
- out_width, mode->vdisplay);
+ out_width, out_height);
}
seq_puts(s, "\n");
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 589a667..1de3675 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -215,8 +215,10 @@
* @misr_enable : boolean entry indicates misr enable/disable status.
* @misr_frame_count : misr frame count provided by client
* @misr_data : store misr data before turning off the clocks.
- * @sbuf_flush_mask: flush mask for inline rotator
+ * @sbuf_op_mode_old : inline rotator op mode for previous commit cycle
* @sbuf_flush_mask_old: inline rotator flush mask for previous commit
+ * @sbuf_flush_mask_all: inline rotator flush mask for all attached planes
+ * @sbuf_flush_mask_delta: inline rotator flush mask for current delta state
* @idle_notify_work: delayed worker to notify idle timeout to user space
* @power_event : registered power event handle
* @cur_perf : current performance committed to clock/bandwidth driver
@@ -284,8 +286,10 @@
u32 misr_frame_count;
u32 misr_data[CRTC_DUAL_MIXERS];
- u32 sbuf_flush_mask;
+ u32 sbuf_op_mode_old;
u32 sbuf_flush_mask_old;
+ u32 sbuf_flush_mask_all;
+ u32 sbuf_flush_mask_delta;
struct kthread_delayed_work idle_notify_work;
struct sde_power_event *power_event;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 4008115..7162b06 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -3443,6 +3443,51 @@
_sde_encoder_power_enable(sde_enc, false);
}
+int sde_encoder_poll_line_counts(struct drm_encoder *drm_enc)
+{
+ static const uint64_t timeout_us = 50000;
+ static const uint64_t sleep_us = 20;
+ struct sde_encoder_virt *sde_enc;
+ ktime_t cur_ktime, exp_ktime;
+ uint32_t line_count, tmp, i;
+
+ if (!drm_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+ sde_enc = to_sde_encoder_virt(drm_enc);
+ if (!sde_enc->cur_master ||
+ !sde_enc->cur_master->ops.get_line_count) {
+ SDE_DEBUG_ENC(sde_enc, "can't get master line count\n");
+ SDE_EVT32(DRMID(drm_enc), SDE_EVTLOG_ERROR);
+ return -EINVAL;
+ }
+
+ exp_ktime = ktime_add_ms(ktime_get(), timeout_us / 1000);
+
+ line_count = sde_enc->cur_master->ops.get_line_count(
+ sde_enc->cur_master);
+
+ for (i = 0; i < (timeout_us * 2 / sleep_us); ++i) {
+ tmp = line_count;
+ line_count = sde_enc->cur_master->ops.get_line_count(
+ sde_enc->cur_master);
+ if (line_count < tmp) {
+ SDE_EVT32(DRMID(drm_enc), line_count);
+ return 0;
+ }
+
+ cur_ktime = ktime_get();
+ if (ktime_compare_safe(exp_ktime, cur_ktime) <= 0)
+ break;
+
+ usleep_range(sleep_us / 2, sleep_us);
+ }
+
+ SDE_EVT32(DRMID(drm_enc), line_count, SDE_EVTLOG_ERROR);
+ return -ETIMEDOUT;
+}
+
int sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
struct sde_encoder_kickoff_params *params)
{
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index 937bd18..8038eb6 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -108,6 +108,13 @@
struct sde_rsc_client *sde_encoder_get_rsc_client(struct drm_encoder *encoder);
/**
+ * sde_encoder_poll_line_counts - poll encoder line counts for start of frame
+ * @encoder: encoder pointer
+ * @Returns: zero on success
+ */
+int sde_encoder_poll_line_counts(struct drm_encoder *encoder);
+
+/**
* sde_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl
* path (i.e. ctl flush and start) at next appropriate time.
* Immediately: if no previous commit is outstanding.
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 7ba9ec9..53c8dfb 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -179,10 +179,21 @@
SDE_ATRACE_BEGIN("pp_done_irq");
/* handle rare cases where the ctl_start_irq is not received */
- if (sde_encoder_phys_cmd_is_master(phys_enc)
- && atomic_add_unless(&phys_enc->pending_retire_fence_cnt, -1, 0))
- phys_enc->parent_ops.handle_frame_done(phys_enc->parent,
- phys_enc, SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE);
+ if (sde_encoder_phys_cmd_is_master(phys_enc)) {
+ /*
+ * Reduce the refcount for the retire fence as well
+ * as for the ctl_start if the counters are greater
+ * than zero. If there was a retire fence count pending,
+ * then signal the RETIRE FENCE here.
+ */
+ if (atomic_add_unless(&phys_enc->pending_retire_fence_cnt,
+ -1, 0))
+ phys_enc->parent_ops.handle_frame_done(
+ phys_enc->parent,
+ phys_enc,
+ SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE);
+ atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
+ }
/* notify all synchronous clients first, then asynchronous clients */
if (phys_enc->parent_ops.handle_frame_done)
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
index bf48271..994bf3d 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
@@ -1248,16 +1248,20 @@
return 0;
}
-void sde_read_intr_resp_ad4(struct sde_hw_dspp *dspp, u32 event, u32 *resp)
+void sde_read_intr_resp_ad4(struct sde_hw_dspp *dspp, u32 event,
+ u32 *resp_in, u32 *resp_out)
{
- if (!dspp || !resp) {
- DRM_ERROR("invalid params dspp %pK resp %pK\n", dspp, resp);
+ if (!dspp || !resp_in || !resp_out) {
+ DRM_ERROR("invalid params dspp %pK resp_in %pK resp_out %pK\n",
+ dspp, resp_in, resp_out);
return;
}
switch (event) {
- case AD4_BACKLIGHT:
- *resp = SDE_REG_READ(&dspp->hw,
+ case AD4_IN_OUT_BACKLIGHT:
+ *resp_in = SDE_REG_READ(&dspp->hw,
+ dspp->cap->sblk->ad.base + 0x2c);
+ *resp_out = SDE_REG_READ(&dspp->hw,
dspp->cap->sblk->ad.base + 0x48);
break;
default:
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
index 2b64165..2d2ac5b 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
@@ -159,10 +159,11 @@
/**
* ad_read_intr_resp - function to get interrupt response for ad
* @event: Event for which response needs to be read
- * @resp: Pointer to u32 where response value is dumped.
+ * @resp_in: Pointer to u32 where resp ad4 input value is dumped.
+ * @resp_out: Pointer to u32 where resp ad4 output value is dumped.
*/
void (*ad_read_intr_resp)(struct sde_hw_dspp *ctx, u32 event,
- u32 *resp);
+ u32 *resp_in, u32 *resp_out);
};
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
index 0aa7650..638d05d 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
@@ -1061,6 +1061,27 @@
spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
}
+static void sde_hw_intr_clear_intr_status_force_mask(struct sde_hw_intr *intr,
+ int irq_idx, u32 irq_mask)
+{
+ int reg_idx;
+
+ if (!intr)
+ return;
+
+ if (irq_idx >= ARRAY_SIZE(sde_irq_map) || irq_idx < 0) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return;
+ }
+
+ reg_idx = sde_irq_map[irq_idx].reg_idx;
+ SDE_REG_WRITE(&intr->hw, sde_intr_set[reg_idx].clr_off,
+ irq_mask);
+
+ /* ensure register writes go through */
+ wmb();
+}
+
static void sde_hw_intr_clear_intr_status_nolock(struct sde_hw_intr *intr,
int irq_idx)
{
@@ -1151,6 +1172,31 @@
return intr_status;
}
+static u32 sde_hw_intr_get_intr_status_nomask(struct sde_hw_intr *intr,
+ int irq_idx, bool clear)
+{
+ int reg_idx;
+ unsigned long irq_flags;
+ u32 intr_status;
+
+ if (!intr)
+ return 0;
+
+ if (irq_idx >= ARRAY_SIZE(sde_irq_map) || irq_idx < 0) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return 0;
+ }
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+
+ reg_idx = sde_irq_map[irq_idx].reg_idx;
+ intr_status = SDE_REG_READ(&intr->hw,
+ sde_intr_set[reg_idx].status_off);
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+ return intr_status;
+}
+
static void __setup_intr_ops(struct sde_hw_intr_ops *ops)
{
ops->set_mask = sde_hw_intr_set_mask;
@@ -1166,8 +1212,11 @@
ops->get_interrupt_statuses = sde_hw_intr_get_interrupt_statuses;
ops->clear_interrupt_status = sde_hw_intr_clear_interrupt_status;
ops->clear_intr_status_nolock = sde_hw_intr_clear_intr_status_nolock;
+ ops->clear_intr_status_force_mask =
+ sde_hw_intr_clear_intr_status_force_mask;
ops->get_interrupt_status = sde_hw_intr_get_interrupt_status;
ops->get_intr_status_nolock = sde_hw_intr_get_intr_status_nolock;
+ ops->get_intr_status_nomask = sde_hw_intr_get_intr_status_nomask;
}
static struct sde_mdss_base_cfg *__intr_offset(struct sde_mdss_cfg *m,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
index 0635b82..955029c 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
@@ -206,6 +206,17 @@
int irq_idx);
/**
+ * clear_intr_status_force_mask() - clear the HW interrupts
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @irq_mask: irq mask to clear
+ */
+ void (*clear_intr_status_force_mask)(
+ struct sde_hw_intr *intr,
+ int irq_idx,
+ u32 irq_mask);
+
+ /**
* get_interrupt_status - Gets HW interrupt status, and clear if set,
* based on given lookup IRQ index.
* @intr: HW interrupt handle
@@ -229,6 +240,17 @@
bool clear);
/**
+ * get_intr_status_nomask - nolock version of get_interrupt_status
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @clear: True to clear irq after read
+ */
+ u32 (*get_intr_status_nomask)(
+ struct sde_hw_intr *intr,
+ int irq_idx,
+ bool clear);
+
+ /**
* get_valid_interrupts - Gets a mask of all valid interrupt sources
* within SDE. These are actually status bits
* within interrupt registers that specify the
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index d39e3a8..d4d6998 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -2862,12 +2862,20 @@
sde_kms->rm_init = true;
+ sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
+ if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
+ rc = PTR_ERR(sde_kms->hw_intr);
+ SDE_ERROR("hw_intr init failed: %d\n", rc);
+ sde_kms->hw_intr = NULL;
+ goto hw_intr_init_err;
+ }
+
/*
* Attempt continuous splash handoff only if reserved
* splash memory is found.
*/
if (sde_kms->splash_data.splash_base)
- sde_rm_cont_splash_res_init(&sde_kms->rm,
+ sde_rm_cont_splash_res_init(priv, &sde_kms->rm,
&sde_kms->splash_data,
sde_kms->catalog);
@@ -2931,14 +2939,6 @@
goto perf_err;
}
- sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
- if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
- rc = PTR_ERR(sde_kms->hw_intr);
- SDE_ERROR("hw_intr init failed: %d\n", rc);
- sde_kms->hw_intr = NULL;
- goto hw_intr_init_err;
- }
-
/*
* _sde_kms_drm_obj_init should create the DRM related objects
* i.e. CRTCs, planes, encoders, connectors and so forth
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index f2f870f..baad60a 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -182,7 +182,7 @@
return cstate;
}
-static bool sde_plane_enabled(struct drm_plane_state *state)
+static bool sde_plane_enabled(const struct drm_plane_state *state)
{
return state && state->fb && state->crtc;
}
@@ -2966,30 +2966,22 @@
}
int sde_plane_confirm_hw_rsvps(struct drm_plane *plane,
- const struct drm_plane_state *state)
+ const struct drm_plane_state *state,
+ struct drm_crtc_state *cstate)
{
- struct drm_crtc_state *cstate;
struct sde_plane_state *pstate;
struct sde_plane_rot_state *rstate;
struct sde_hw_blk *hw_blk;
- if (!plane || !state) {
- SDE_ERROR("invalid plane/state\n");
+ if (!plane || !state || !cstate) {
+ SDE_ERROR("invalid parameters\n");
return -EINVAL;
}
pstate = to_sde_plane_state(state);
rstate = &pstate->rot;
- /* cstate will be null if crtc is disconnected from plane */
- cstate = _sde_plane_get_crtc_state((struct drm_plane_state *)state);
- if (IS_ERR_OR_NULL(cstate)) {
- SDE_ERROR("invalid crtc state\n");
- return -EINVAL;
- }
-
- if (sde_plane_enabled((struct drm_plane_state *)state) &&
- rstate->out_sbuf) {
+ if (sde_plane_enabled(state) && rstate->out_sbuf) {
SDE_DEBUG("plane%d.%d acquire rotator, fb %d\n",
plane->base.id, rstate->sequence_id,
state->fb ? state->fb->base.id : -1);
@@ -3005,7 +2997,15 @@
SDE_EVTLOG_ERROR);
return -EINVAL;
}
+
+ _sde_plane_rot_get_fb(plane, cstate, rstate);
+
+ SDE_EVT32(DRMID(plane), rstate->sequence_id,
+ state->fb ? state->fb->base.id : -1,
+ rstate->out_fb ? rstate->out_fb->base.id : -1,
+ hw_blk->id);
}
+
return 0;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index e8b621c..6666399 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -204,10 +204,12 @@
* sde_plane_confirm_hw_rsvps - reserve an sbuf resource, if needed
* @plane: Pointer to DRM plane object
* @state: Pointer to plane state
+ * @cstate: Pointer to crtc state containing the resource pool
* Returns: Zero on success
*/
int sde_plane_confirm_hw_rsvps(struct drm_plane *plane,
- const struct drm_plane_state *state);
+ const struct drm_plane_state *state,
+ struct drm_crtc_state *cstate);
/**
* sde_plane_get_ctl_flush - get control flush mask
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
index 1b71a82..f9092e2 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -1130,22 +1130,62 @@
}
/**
+ * poll_intr_status - Gets HW interrupt status based on
+ * given lookup IRQ index.
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @msec: Maximum delay allowed to check intr status
+ * return: return zero on success.
+ */
+static u32 _sde_rm_poll_intr_status_for_cont_splash
+ (struct sde_hw_intr *intr,
+ int irq_idx, u32 const msec)
+{
+ int i;
+ u32 status = 0;
+ u32 const delay_us = 500;
+ u32 const timeout_us = msec * 1000;
+ /* Make sure the status is checked atleast once */
+ int loop = max((u32)1, (u32)(timeout_us / delay_us));
+
+ if (!intr)
+ return 0;
+
+ for (i = 0; i < loop; i++) {
+ status = intr->ops.get_intr_status_nomask
+ (intr, irq_idx, false);
+
+ if (status & BIT(irq_idx)) {
+ SDE_DEBUG(" Poll success. i=%d, status=0x%x\n",
+ i, status);
+ return 0;
+ }
+ usleep_range(delay_us, delay_us + 10);
+ }
+ SDE_ERROR("polling timed out. status = 0x%x\n", status);
+ return -ETIMEDOUT;
+}
+
+/**
* sde_rm_get_pp_dsc_for_cont_splash - retrieve the current dsc enabled blocks
* and disable autorefresh if enabled.
- * @mmio: mapped register io address of MDP
+ * @rm: Pointer to resource manager structure
+ * @sde_kms: Pointer to sde kms structure
* @max_dsc_cnt: number of DSC blocks supported in the hw
* @dsc_ids: pointer to store the active DSC block IDs
* return: number of active DSC blocks
*/
static int _sde_rm_get_pp_dsc_for_cont_splash(struct sde_rm *rm,
- int max_dsc_cnt, u8 *dsc_ids)
+ struct sde_kms *sde_kms,
+ int max_dsc_cnt, u8 *dsc_ids)
{
int index = 0;
int value, dsc_cnt = 0;
struct sde_hw_autorefresh cfg;
struct sde_rm_hw_iter iter_pp;
+ int irq_idx_pp_done = -1;
- if (!rm || !dsc_ids) {
+ if (!rm || !sde_kms || !dsc_ids) {
SDE_ERROR("invalid input parameters\n");
return 0;
}
@@ -1155,11 +1195,21 @@
while (_sde_rm_get_hw_locked(rm, &iter_pp)) {
struct sde_hw_pingpong *pp =
to_sde_hw_pingpong(iter_pp.blk->hw);
+ u32 intr_value = 0;
+ u32 const timeout_ms = 35; /* Max two vsyncs delay */
+ int rc = 0, i, loop = 2;
+ struct sde_hw_intr *hw_intr = NULL;
+ struct sde_hw_pp_vsync_info info;
if (!pp->ops.get_dsc_status) {
SDE_ERROR("get_dsc_status ops not initialized\n");
return 0;
}
+ hw_intr = sde_kms->hw_intr;
+ if (!hw_intr) {
+ SDE_ERROR("hw_intr handler not initialized\n");
+ return 0;
+ }
value = pp->ops.get_dsc_status(pp);
SDE_DEBUG("DSC[%d]=0x%x, dsc_cnt = %d\n",
index, value, dsc_cnt);
@@ -1177,14 +1227,61 @@
if (!pp->ops.get_autorefresh(pp, &cfg)
&& (cfg.enable)
&& (pp->ops.setup_autorefresh)) {
+ if (hw_intr->ops.irq_idx_lookup) {
+ irq_idx_pp_done = hw_intr->ops.irq_idx_lookup
+ (SDE_IRQ_TYPE_PING_PONG_COMP,
+ pp->idx);
+ SDE_DEBUG(" itr_idx = %d\n", irq_idx_pp_done);
+ }
+
+ if ((irq_idx_pp_done >= 0) &&
+ (hw_intr->ops.get_intr_status_nomask)) {
+ intr_value = hw_intr->ops.get_intr_status_nomask
+ (hw_intr, irq_idx_pp_done, false);
+ hw_intr->ops.clear_intr_status_force_mask
+ (hw_intr, irq_idx_pp_done, intr_value);
+ }
cfg.enable = false;
- SDE_DEBUG("Disabling autoreferesh\n");
+ SDE_DEBUG("Disabling autorefresh\n");
pp->ops.setup_autorefresh(pp, &cfg);
+
/*
- * Wait for one frame update so that
- * auto refresh disable is through
+ * Check the line count again if
+ * the line count is equal to the active
+ * height to make sure their is no
+ * additional frame updates
*/
- usleep_range(16000, 20000);
+ for (i = 0; i < loop; i++) {
+ info.wr_ptr_line_count = 0;
+ info.rd_ptr_init_val = 0;
+ if (pp->ops.get_vsync_info)
+ pp->ops.get_vsync_info(pp, &info);
+ /*
+ * For cmd-mode using external-TE logic,
+ * the rd_ptr_init_val is equal to
+ * active-height. Use this init_val to
+ * compare that with lane count. Need
+ * to implement a different check
+ * if external-TE is not used.
+ */
+ if (info.wr_ptr_line_count
+ < info.rd_ptr_init_val) {
+ /* wait for read ptr intr */
+ rc =
+ _sde_rm_poll_intr_status_for_cont_splash
+ (hw_intr, irq_idx_pp_done, timeout_ms);
+ if (!rc)
+ break;
+ }
+ SDE_DEBUG("i=%d, line count=%d\n",
+ i, info.wr_ptr_line_count);
+ /*
+ * Wait for few milli seconds for line count
+ * to increase if any frame transfer is
+ * pending.
+ */
+ usleep_range(3000, 4000);
+ }
}
}
@@ -1266,14 +1363,16 @@
top->dspp_sel, top->intf_sel);
}
-int sde_rm_cont_splash_res_init(struct sde_rm *rm,
+int sde_rm_cont_splash_res_init(struct msm_drm_private *priv,
+ struct sde_rm *rm,
struct sde_splash_data *splash_data,
struct sde_mdss_cfg *cat)
{
struct sde_rm_hw_iter iter_c;
int index = 0, ctl_top_cnt;
+ struct sde_kms *sde_kms = NULL;
- if (!rm || !cat || !splash_data) {
+ if (!priv || !rm || !cat || !splash_data) {
SDE_ERROR("invalid input parameters\n");
return -EINVAL;
}
@@ -1285,6 +1384,12 @@
ctl_top_cnt = cat->ctl_count;
+ if (!priv->kms) {
+ SDE_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+ sde_kms = to_sde_kms(priv->kms);
+
if (ctl_top_cnt > ARRAY_SIZE(splash_data->top)) {
SDE_ERROR("Mismatch in ctl_top array size\n");
return -EINVAL;
@@ -1318,6 +1423,7 @@
splash_data->dsc_cnt =
_sde_rm_get_pp_dsc_for_cont_splash(rm,
+ sde_kms,
cat->dsc_count,
splash_data->dsc_ids);
SDE_DEBUG("splash_data: ctl_top_cnt=%d, lm_cnt=%d, dsc_cnt=%d\n",
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.h b/drivers/gpu/drm/msm/sde/sde_rm.h
index b2dd87d..11f4b6f 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.h
+++ b/drivers/gpu/drm/msm/sde/sde_rm.h
@@ -210,12 +210,14 @@
* sde_rm_cont_splash_res_init - Read the current MDSS configuration
* to update the splash data structure with the topology
* configured by the bootloader.
+ * @priv: DRM private structure handle
* @rm: SDE Resource Manager handle
* @splash_data: Pointer to the splash_data structure to be updated.
* @cat: Pointer to the SDE catalog
* @Return: 0 on success or error
*/
-int sde_rm_cont_splash_res_init(struct sde_rm *rm,
+int sde_rm_cont_splash_res_init(struct msm_drm_private *priv,
+ struct sde_rm *rm,
struct sde_splash_data *splash_data,
struct sde_mdss_cfg *cat);
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index 89453b0..5bfed6f 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -80,6 +80,7 @@
#define A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI 0x8A6
#define A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO 0x8A7
#define A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI 0x8A8
+#define A6XX_CP_CONTEXT_SWITCH_LEVEL_STATUS 0x8AB
#define A6XX_CP_PERFCTR_CP_SEL_0 0x8D0
#define A6XX_CP_PERFCTR_CP_SEL_1 0x8D1
#define A6XX_CP_PERFCTR_CP_SEL_2 0x8D2
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 08cd06b..26c1c39 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -348,7 +348,7 @@
.patchid = ANY_ID,
.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_IFPC |
ADRENO_GPMU | ADRENO_CONTENT_PROTECTION |
- ADRENO_IOCOHERENT,
+ ADRENO_IOCOHERENT | ADRENO_PREEMPTION,
.sqefw_name = "a630_sqe.fw",
.zap_name = "a630_zap",
.gpudev = &adreno_a6xx_gpudev,
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index b8006b7..b8635e1 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -119,7 +119,6 @@
.skipsaverestore = 1,
.usesgmem = 1,
},
- .priv = BIT(ADRENO_DEVICE_PREEMPTION_EXECUTION),
};
/* Ptr to array for the current set of fault detect registers */
@@ -947,6 +946,8 @@
"qcom,gpu-quirk-lmloadkill-disable" },
{ ADRENO_QUIRK_HFI_USE_REG, "qcom,gpu-quirk-hfi-use-reg" },
{ ADRENO_QUIRK_SECVID_SET_ONCE, "qcom,gpu-quirk-secvid-set-once" },
+ { ADRENO_QUIRK_LIMIT_UCHE_GBIF_RW,
+ "qcom,gpu-quirk-limit-uche-gbif-rw" },
};
static int adreno_of_get_power(struct adreno_device *adreno_dev,
@@ -1431,7 +1432,8 @@
}
- if (nopreempt == false) {
+ if (nopreempt == false &&
+ ADRENO_FEATURE(adreno_dev, ADRENO_PREEMPTION)) {
int r = 0;
if (gpudev->preemption_init)
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 0dd1921..269c3a9 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -144,6 +144,12 @@
#define ADRENO_QUIRK_HFI_USE_REG BIT(6)
/* Only set protected SECVID registers once */
#define ADRENO_QUIRK_SECVID_SET_ONCE BIT(7)
+/*
+ * Limit number of read and write transactions from
+ * UCHE block to GBIF to avoid possible deadlock
+ * between GBIF, SMMU and MEMNOC.
+ */
+#define ADRENO_QUIRK_LIMIT_UCHE_GBIF_RW BIT(8)
/* Flags to control command packet settings */
#define KGSL_CMD_FLAGS_NONE 0
@@ -575,7 +581,6 @@
ADRENO_DEVICE_ISDB_ENABLED = 12,
ADRENO_DEVICE_CACHE_FLUSH_TS_SUSPENDED = 13,
ADRENO_DEVICE_HARD_RESET = 14,
- ADRENO_DEVICE_PREEMPTION_EXECUTION = 15,
ADRENO_DEVICE_CORESIGHT_CX = 16,
};
@@ -650,6 +655,7 @@
ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI,
ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO,
ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI,
+ ADRENO_REG_CP_PREEMPT_LEVEL_STATUS,
ADRENO_REG_RBBM_STATUS,
ADRENO_REG_RBBM_STATUS3,
ADRENO_REG_RBBM_PERFCTR_CTL,
@@ -1647,22 +1653,10 @@
smp_wmb();
}
-static inline bool adreno_is_preemption_execution_enabled(
- struct adreno_device *adreno_dev)
-{
- return test_bit(ADRENO_DEVICE_PREEMPTION_EXECUTION, &adreno_dev->priv);
-}
-
-static inline bool adreno_is_preemption_setup_enabled(
- struct adreno_device *adreno_dev)
-{
- return test_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv);
-}
-
static inline bool adreno_is_preemption_enabled(
struct adreno_device *adreno_dev)
{
- return 0;
+ return test_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv);
}
/**
* adreno_ctx_get_rb() - Return the ringbuffer that a context should
@@ -1687,7 +1681,7 @@
* ringbuffer
*/
- if (!adreno_is_preemption_execution_enabled(adreno_dev))
+ if (!adreno_is_preemption_enabled(adreno_dev))
return &(adreno_dev->ringbuffers[0]);
/*
@@ -1938,13 +1932,15 @@
* Need to release CX Halt explicitly in case of SW_RESET.
* GX Halt release will be taken care by SW_RESET internally.
*/
- adreno_writereg(adreno_dev, ADRENO_REG_RBBM_GPR0_CNTL,
- GBIF_HALT_REQUEST);
- ret = adreno_wait_for_vbif_halt_ack(device,
- ADRENO_REG_RBBM_VBIF_GX_RESET_STATUS,
- VBIF_RESET_ACK_MASK);
- if (ret)
- return ret;
+ if (gpudev->gx_is_on(adreno_dev)) {
+ adreno_writereg(adreno_dev, ADRENO_REG_RBBM_GPR0_CNTL,
+ GBIF_HALT_REQUEST);
+ ret = adreno_wait_for_vbif_halt_ack(device,
+ ADRENO_REG_RBBM_VBIF_GX_RESET_STATUS,
+ VBIF_RESET_ACK_MASK);
+ if (ret)
+ return ret;
+ }
adreno_writereg(adreno_dev, ADRENO_REG_GBIF_HALT, mask);
ret = adreno_wait_for_vbif_halt_ack(device,
diff --git a/drivers/gpu/msm/adreno_a5xx_preempt.c b/drivers/gpu/msm/adreno_a5xx_preempt.c
index 4036530..d5da562 100644
--- a/drivers/gpu/msm/adreno_a5xx_preempt.c
+++ b/drivers/gpu/msm/adreno_a5xx_preempt.c
@@ -88,7 +88,7 @@
del_timer_sync(&adreno_dev->preempt.timer);
- trace_adreno_preempt_done(adreno_dev->cur_rb, adreno_dev->next_rb);
+ trace_adreno_preempt_done(adreno_dev->cur_rb, adreno_dev->next_rb, 0);
/* Clean up all the bits */
adreno_dev->prev_rb = adreno_dev->cur_rb;
@@ -272,7 +272,8 @@
mod_timer(&adreno_dev->preempt.timer,
jiffies + msecs_to_jiffies(ADRENO_PREEMPT_TIMEOUT));
- trace_adreno_preempt_trigger(adreno_dev->cur_rb, adreno_dev->next_rb);
+ trace_adreno_preempt_trigger(adreno_dev->cur_rb, adreno_dev->next_rb,
+ 1);
adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_TRIGGERED);
@@ -307,8 +308,7 @@
del_timer(&adreno_dev->preempt.timer);
- trace_adreno_preempt_done(adreno_dev->cur_rb,
- adreno_dev->next_rb);
+ trace_adreno_preempt_done(adreno_dev->cur_rb, adreno_dev->next_rb, 0);
adreno_dev->prev_rb = adreno_dev->cur_rb;
adreno_dev->cur_rb = adreno_dev->next_rb;
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 09d6a10..381bc3e 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -55,7 +55,6 @@
static const struct adreno_vbif_data a615_gbif[] = {
{A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3},
- {A6XX_UCHE_GBIF_GX_CONFIG, 0x10200F9},
{0, 0},
};
@@ -363,6 +362,10 @@
{ A6XX_CP_AHB_CNTL, 0x0 },
};
+static struct reg_list_pair a615_ifpc_pwrup_reglist[] = {
+ { A6XX_UCHE_GBIF_GX_CONFIG, 0x0 },
+};
+
static void _update_always_on_regs(struct adreno_device *adreno_dev)
{
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
@@ -584,6 +587,7 @@
uint32_t i;
struct cpu_gpu_lock *lock;
struct reg_list_pair *r;
+ uint16_t a615_list_size = 0;
/* Set up the register values */
for (i = 0; i < ARRAY_SIZE(a6xx_ifpc_pwrup_reglist); i++) {
@@ -596,6 +600,19 @@
kgsl_regread(KGSL_DEVICE(adreno_dev), r->offset, &r->val);
}
+ if (adreno_is_a615(adreno_dev)) {
+ for (i = 0; i < ARRAY_SIZE(a615_ifpc_pwrup_reglist); i++) {
+ r = &a615_ifpc_pwrup_reglist[i];
+ kgsl_regread(KGSL_DEVICE(adreno_dev),
+ r->offset, &r->val);
+ }
+
+ a615_list_size = sizeof(a615_ifpc_pwrup_reglist);
+
+ memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock),
+ a615_ifpc_pwrup_reglist, a615_list_size);
+ }
+
lock = (struct cpu_gpu_lock *) adreno_dev->pwrup_reglist.hostptr;
lock->flag_ucode = 0;
lock->flag_kmd = 0;
@@ -614,13 +631,15 @@
* of the static IFPC-only register list.
*/
lock->list_length = (sizeof(a6xx_ifpc_pwrup_reglist) +
- sizeof(a6xx_pwrup_reglist)) >> 2;
- lock->list_offset = sizeof(a6xx_ifpc_pwrup_reglist) >> 2;
+ sizeof(a6xx_pwrup_reglist) + a615_list_size) >> 2;
+ lock->list_offset = (sizeof(a6xx_ifpc_pwrup_reglist) +
+ a615_list_size) >> 2;
- memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock),
+ memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock)
+ + a615_list_size,
a6xx_ifpc_pwrup_reglist, sizeof(a6xx_ifpc_pwrup_reglist));
memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock)
- + sizeof(a6xx_ifpc_pwrup_reglist),
+ + sizeof(a6xx_ifpc_pwrup_reglist) + a615_list_size,
a6xx_pwrup_reglist, sizeof(a6xx_pwrup_reglist));
}
@@ -650,6 +669,9 @@
adreno_vbif_start(adreno_dev, a6xx_vbif_platforms,
ARRAY_SIZE(a6xx_vbif_platforms));
+ if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_LIMIT_UCHE_GBIF_RW))
+ kgsl_regwrite(device, A6XX_UCHE_GBIF_GX_CONFIG, 0x10200F9);
+
/* Make all blocks contribute to the GPU BUSY perf counter */
kgsl_regwrite(device, A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
@@ -749,7 +771,7 @@
kgsl_regrmw(device, A6XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
/* Enable the GMEM save/restore feature for preemption */
- if (adreno_is_preemption_setup_enabled(adreno_dev))
+ if (adreno_is_preemption_enabled(adreno_dev))
kgsl_regwrite(device, A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE,
0x1);
@@ -999,7 +1021,7 @@
struct adreno_ringbuffer *rb = adreno_dev->cur_rb;
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- if (!adreno_is_preemption_execution_enabled(adreno_dev))
+ if (!adreno_is_preemption_enabled(adreno_dev))
return 0;
cmds = adreno_ringbuffer_allocspace(rb, 42);
@@ -2555,7 +2577,7 @@
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- if (adreno_is_preemption_execution_enabled(adreno_dev))
+ if (adreno_is_preemption_enabled(adreno_dev))
a6xx_preemption_trigger(adreno_dev);
adreno_dispatcher_schedule(device);
@@ -3626,6 +3648,8 @@
A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO),
ADRENO_REG_DEFINE(ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI,
A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_PREEMPT_LEVEL_STATUS,
+ A6XX_CP_CONTEXT_SWITCH_LEVEL_STATUS),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS, A6XX_RBBM_STATUS),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS3, A6XX_RBBM_STATUS3),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_CTL, A6XX_RBBM_PERFCTR_CNTL),
diff --git a/drivers/gpu/msm/adreno_a6xx_preempt.c b/drivers/gpu/msm/adreno_a6xx_preempt.c
index d92d1e0..b9dd5f4 100644
--- a/drivers/gpu/msm/adreno_a6xx_preempt.c
+++ b/drivers/gpu/msm/adreno_a6xx_preempt.c
@@ -121,7 +121,10 @@
del_timer_sync(&adreno_dev->preempt.timer);
- trace_adreno_preempt_done(adreno_dev->cur_rb, adreno_dev->next_rb);
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT_LEVEL_STATUS, &status);
+
+ trace_adreno_preempt_done(adreno_dev->cur_rb, adreno_dev->next_rb,
+ status);
/* Clean up all the bits */
adreno_dev->prev_rb = adreno_dev->cur_rb;
@@ -230,14 +233,13 @@
struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
struct adreno_ringbuffer *next;
uint64_t ttbr0, gpuaddr;
- unsigned int contextidr;
+ unsigned int contextidr, cntl;
unsigned long flags;
- uint32_t preempt_level, usesgmem, skipsaverestore;
struct adreno_preemption *preempt = &adreno_dev->preempt;
- preempt_level = preempt->preempt_level;
- usesgmem = preempt->usesgmem;
- skipsaverestore = preempt->skipsaverestore;
+ cntl = (((preempt->preempt_level << 6) & 0xC0) |
+ ((preempt->skipsaverestore << 9) & 0x200) |
+ ((preempt->usesgmem << 8) & 0x100) | 0x1);
/* Put ourselves into a possible trigger state */
if (!adreno_move_preempt_state(adreno_dev,
@@ -360,16 +362,13 @@
mod_timer(&adreno_dev->preempt.timer,
jiffies + msecs_to_jiffies(ADRENO_PREEMPT_TIMEOUT));
- trace_adreno_preempt_trigger(adreno_dev->cur_rb, adreno_dev->next_rb);
+ trace_adreno_preempt_trigger(adreno_dev->cur_rb, adreno_dev->next_rb,
+ cntl);
adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_TRIGGERED);
/* Trigger the preemption */
- adreno_gmu_fenced_write(adreno_dev,
- ADRENO_REG_CP_PREEMPT,
- (((preempt_level << 6) & 0xC0) |
- ((skipsaverestore << 9) & 0x200) |
- ((usesgmem << 8) & 0x100) | 0x1),
+ adreno_gmu_fenced_write(adreno_dev, ADRENO_REG_CP_PREEMPT, cntl,
FENCE_STATUS_WRITEDROPPED1_MASK);
/*
@@ -408,8 +407,10 @@
del_timer(&adreno_dev->preempt.timer);
- trace_adreno_preempt_done(adreno_dev->cur_rb,
- adreno_dev->next_rb);
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT_LEVEL_STATUS, &status);
+
+ trace_adreno_preempt_done(adreno_dev->cur_rb, adreno_dev->next_rb,
+ status);
adreno_dev->prev_rb = adreno_dev->cur_rb;
adreno_dev->cur_rb = adreno_dev->next_rb;
@@ -431,7 +432,7 @@
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- if (!adreno_is_preemption_execution_enabled(adreno_dev))
+ if (!adreno_is_preemption_enabled(adreno_dev))
return;
mutex_lock(&device->mutex);
@@ -536,7 +537,7 @@
struct adreno_ringbuffer *rb;
unsigned int i;
- if (!adreno_is_preemption_execution_enabled(adreno_dev))
+ if (!adreno_is_preemption_enabled(adreno_dev))
return;
/* Force the state to be clear */
@@ -728,7 +729,7 @@
struct kgsl_device *device = context->device;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- if (!adreno_is_preemption_setup_enabled(adreno_dev))
+ if (!adreno_is_preemption_enabled(adreno_dev))
return;
gpumem_free_entry(context->user_ctxt_record);
@@ -743,7 +744,7 @@
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
uint64_t flags = 0;
- if (!adreno_is_preemption_setup_enabled(adreno_dev))
+ if (!adreno_is_preemption_enabled(adreno_dev))
return 0;
if (context->flags & KGSL_CONTEXT_SECURE)
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index 589417f..5572cd7 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -241,7 +241,7 @@
static const unsigned int a6xx_gbif_registers[] = {
/* GBIF */
- 0x3C00, 0X3C0B, 0X3C40, 0X3C47, 0X3CC0, 0X3CD1,
+ 0x3C00, 0X3C0B, 0X3C40, 0X3C47, 0X3CC0, 0X3CD1, 0xE3A, 0xE3A,
};
static const unsigned int a6xx_gmu_gx_registers[] = {
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 0caf55b..472f78e 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -2141,7 +2141,7 @@
* Deleting uninitialized timer will block for ever on kernel debug
* disable build. Hence skip del timer if it is not initialized.
*/
- if (adreno_is_preemption_execution_enabled(adreno_dev))
+ if (adreno_is_preemption_enabled(adreno_dev))
del_timer_sync(&adreno_dev->preempt.timer);
mutex_lock(&device->mutex);
diff --git a/drivers/gpu/msm/adreno_ioctl.c b/drivers/gpu/msm/adreno_ioctl.c
index 13d71982..aa8c2bf 100644
--- a/drivers/gpu/msm/adreno_ioctl.c
+++ b/drivers/gpu/msm/adreno_ioctl.c
@@ -96,7 +96,7 @@
int levels_to_copy;
if (!adreno_is_a5xx(adreno_dev) ||
- !adreno_is_preemption_execution_enabled(adreno_dev))
+ !adreno_is_preemption_enabled(adreno_dev))
return -EOPNOTSUPP;
if (read->size_user < size_level)
diff --git a/drivers/gpu/msm/adreno_iommu.c b/drivers/gpu/msm/adreno_iommu.c
index 1a2f8ff..db6dff2 100644
--- a/drivers/gpu/msm/adreno_iommu.c
+++ b/drivers/gpu/msm/adreno_iommu.c
@@ -761,7 +761,7 @@
cmds = &link[0];
cmds += __add_curr_ctxt_cmds(rb, cmds, drawctxt);
- result = adreno_ringbuffer_issuecmds(rb, 0, link,
+ result = adreno_ringbuffer_issue_internal_cmds(rb, 0, link,
(unsigned int)(cmds - link));
return result;
}
@@ -834,7 +834,7 @@
* This returns the per context timestamp but we need to
* use the global timestamp for iommu clock disablement
*/
- result = adreno_ringbuffer_issuecmds(rb,
+ result = adreno_ringbuffer_issue_internal_cmds(rb,
KGSL_CMD_FLAGS_PMODE, link,
(unsigned int)(cmds - link));
diff --git a/drivers/gpu/msm/adreno_perfcounter.c b/drivers/gpu/msm/adreno_perfcounter.c
index 94fdbc2..5020750 100644
--- a/drivers/gpu/msm/adreno_perfcounter.c
+++ b/drivers/gpu/msm/adreno_perfcounter.c
@@ -823,8 +823,8 @@
*cmds++ = cp_register(adreno_dev, reg->select, 1);
*cmds++ = countable;
/* submit to highest priority RB always */
- ret = adreno_ringbuffer_issuecmds(rb, KGSL_CMD_FLAGS_PMODE,
- buf, cmds-buf);
+ ret = adreno_ringbuffer_issue_internal_cmds(rb,
+ KGSL_CMD_FLAGS_PMODE, buf, cmds-buf);
if (ret)
return ret;
/*
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 01d9f71..52a35c4 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -37,6 +37,11 @@
#define RB_GPUADDR(_rb, _pos) \
((_rb)->buffer_desc.gpuaddr + ((_pos) * sizeof(unsigned int)))
+static inline bool is_internal_cmds(unsigned int flags)
+{
+ return (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE);
+}
+
static void adreno_get_submit_time(struct adreno_device *adreno_dev,
struct adreno_ringbuffer *rb,
struct adreno_submit_time *time)
@@ -260,7 +265,7 @@
return status;
}
- if (nopreempt == false)
+ if (nopreempt == false && ADRENO_FEATURE(adreno_dev, ADRENO_PREEMPTION))
adreno_dev->num_ringbuffers = gpudev->num_prio_levels;
else
adreno_dev->num_ringbuffers = 1;
@@ -389,7 +394,7 @@
struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
if (drawctxt != NULL && kgsl_context_detached(&drawctxt->base) &&
- !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))
+ !is_internal_cmds(flags))
return -ENOENT;
/* On fault return error so that we don't keep submitting */
@@ -399,7 +404,7 @@
rb->timestamp++;
/* If this is a internal IB, use the global timestamp for it */
- if (!drawctxt || (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))
+ if (!drawctxt || is_internal_cmds(flags))
timestamp = rb->timestamp;
else {
context_id = drawctxt->base.id;
@@ -428,7 +433,7 @@
*/
profile_ready = drawctxt &&
adreno_profile_assignments_ready(&adreno_dev->profile) &&
- !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE);
+ !is_internal_cmds(flags);
/*
* reserve space to temporarily turn off protected mode
@@ -438,7 +443,7 @@
/* 2 dwords to store the start of command sequence */
total_sizedwords += 2;
/* internal ib command identifier for the ringbuffer */
- total_sizedwords += (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) ? 2 : 0;
+ total_sizedwords += is_internal_cmds(flags) ? 2 : 0;
total_sizedwords += (secured_ctxt) ? 26 : 0;
@@ -455,11 +460,11 @@
total_sizedwords += 4;
if (gpudev->preemption_pre_ibsubmit &&
- adreno_is_preemption_execution_enabled(adreno_dev))
+ adreno_is_preemption_enabled(adreno_dev))
total_sizedwords += 27;
if (gpudev->preemption_post_ibsubmit &&
- adreno_is_preemption_execution_enabled(adreno_dev))
+ adreno_is_preemption_enabled(adreno_dev))
total_sizedwords += 10;
/*
@@ -472,7 +477,7 @@
total_sizedwords += 8; /* sop timestamp */
total_sizedwords += 5; /* eop timestamp */
- if (drawctxt && !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
+ if (drawctxt && !is_internal_cmds(flags)) {
/* global timestamp without cache flush for non-zero context */
total_sizedwords += 4;
}
@@ -511,12 +516,12 @@
*ringcmds++ = cp_packet(adreno_dev, CP_NOP, 1);
*ringcmds++ = KGSL_CMD_IDENTIFIER;
- if (adreno_is_preemption_execution_enabled(adreno_dev) &&
+ if (adreno_is_preemption_enabled(adreno_dev) &&
gpudev->preemption_pre_ibsubmit)
ringcmds += gpudev->preemption_pre_ibsubmit(
adreno_dev, rb, ringcmds, context);
- if (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) {
+ if (is_internal_cmds(flags)) {
*ringcmds++ = cp_packet(adreno_dev, CP_NOP, 1);
*ringcmds++ = KGSL_CMD_INTERNAL_IDENTIFIER;
}
@@ -553,7 +558,7 @@
&flags, &ringcmds);
/* start-of-pipeline timestamp for the context */
- if (drawctxt && !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))
+ if (drawctxt && !is_internal_cmds(flags))
ringcmds += cp_mem_write(adreno_dev, ringcmds,
MEMSTORE_ID_GPU_ADDR(device, context_id, soptimestamp),
timestamp);
@@ -627,12 +632,12 @@
* set and hence the rb timestamp will be used in else statement below.
*/
*ringcmds++ = cp_mem_packet(adreno_dev, CP_EVENT_WRITE, 3, 1);
- if (drawctxt || (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))
+ if (drawctxt || is_internal_cmds(flags))
*ringcmds++ = CACHE_FLUSH_TS | (1 << 31);
else
*ringcmds++ = CACHE_FLUSH_TS;
- if (drawctxt && !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
+ if (drawctxt && !is_internal_cmds(flags)) {
ringcmds += cp_gpuaddr(adreno_dev, ringcmds,
MEMSTORE_ID_GPU_ADDR(device, context_id, eoptimestamp));
*ringcmds++ = timestamp;
@@ -669,7 +674,7 @@
ringcmds += cp_secure_mode(adreno_dev, ringcmds, 0);
if (gpudev->preemption_post_ibsubmit &&
- adreno_is_preemption_execution_enabled(adreno_dev))
+ adreno_is_preemption_enabled(adreno_dev))
ringcmds += gpudev->preemption_post_ibsubmit(adreno_dev,
ringcmds);
@@ -693,7 +698,7 @@
}
int
-adreno_ringbuffer_issuecmds(struct adreno_ringbuffer *rb,
+adreno_ringbuffer_issue_internal_cmds(struct adreno_ringbuffer *rb,
unsigned int flags,
unsigned int *cmds,
int sizedwords)
@@ -874,10 +879,9 @@
dwords += 2;
}
- if (adreno_is_preemption_execution_enabled(adreno_dev)) {
+ if (adreno_is_preemption_enabled(adreno_dev))
if (gpudev->preemption_yield_enable)
dwords += 8;
- }
/*
* Prior to SQE FW version 1.49, there was only one marker for
@@ -952,10 +956,9 @@
if (gpudev->ccu_invalidate)
cmds += gpudev->ccu_invalidate(adreno_dev, cmds);
- if (adreno_is_preemption_execution_enabled(adreno_dev)) {
+ if (adreno_is_preemption_enabled(adreno_dev))
if (gpudev->preemption_yield_enable)
cmds += gpudev->preemption_yield_enable(cmds);
- }
if (kernel_profiling) {
cmds += _get_alwayson_counter(adreno_dev, cmds,
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index fbee627..1dfdb5b 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -158,7 +158,7 @@
void adreno_ringbuffer_close(struct adreno_device *adreno_dev);
-int adreno_ringbuffer_issuecmds(struct adreno_ringbuffer *rb,
+int adreno_ringbuffer_issue_internal_cmds(struct adreno_ringbuffer *rb,
unsigned int flags,
unsigned int *cmdaddr,
int sizedwords);
diff --git a/drivers/gpu/msm/adreno_sysfs.c b/drivers/gpu/msm/adreno_sysfs.c
index e309ab0..2d2c9e5 100644
--- a/drivers/gpu/msm/adreno_sysfs.c
+++ b/drivers/gpu/msm/adreno_sysfs.c
@@ -223,14 +223,17 @@
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- if (test_bit(ADRENO_DEVICE_PREEMPTION_EXECUTION,
- &adreno_dev->priv) == val)
- return 0;
-
mutex_lock(&device->mutex);
+ if (!(ADRENO_FEATURE(adreno_dev, ADRENO_PREEMPTION)) ||
+ (test_bit(ADRENO_DEVICE_PREEMPTION,
+ &adreno_dev->priv) == val)) {
+ mutex_unlock(&device->mutex);
+ return 0;
+ }
+
kgsl_pwrctrl_change_state(device, KGSL_STATE_SUSPEND);
- change_bit(ADRENO_DEVICE_PREEMPTION_EXECUTION, &adreno_dev->priv);
+ change_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv);
adreno_dev->cur_rb = &(adreno_dev->ringbuffers[0]);
kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER);
@@ -258,7 +261,7 @@
static unsigned int _preemption_show(struct adreno_device *adreno_dev)
{
- return adreno_is_preemption_execution_enabled(adreno_dev);
+ return adreno_is_preemption_enabled(adreno_dev);
}
static int _hwcg_store(struct adreno_device *adreno_dev,
diff --git a/drivers/gpu/msm/adreno_trace.h b/drivers/gpu/msm/adreno_trace.h
index e33060a..de028fa 100644
--- a/drivers/gpu/msm/adreno_trace.h
+++ b/drivers/gpu/msm/adreno_trace.h
@@ -573,34 +573,40 @@
);
TRACE_EVENT(adreno_preempt_trigger,
- TP_PROTO(struct adreno_ringbuffer *cur, struct adreno_ringbuffer *next),
- TP_ARGS(cur, next),
+ TP_PROTO(struct adreno_ringbuffer *cur, struct adreno_ringbuffer *next,
+ unsigned int cntl),
+ TP_ARGS(cur, next, cntl),
TP_STRUCT__entry(
__field(struct adreno_ringbuffer *, cur)
__field(struct adreno_ringbuffer *, next)
+ __field(unsigned int, cntl)
),
TP_fast_assign(
__entry->cur = cur;
__entry->next = next;
+ __entry->cntl = cntl;
),
- TP_printk("trigger from id=%d to id=%d",
- __entry->cur->id, __entry->next->id
+ TP_printk("trigger from id=%d to id=%d cntl=%x",
+ __entry->cur->id, __entry->next->id, __entry->cntl
)
);
TRACE_EVENT(adreno_preempt_done,
- TP_PROTO(struct adreno_ringbuffer *cur, struct adreno_ringbuffer *next),
- TP_ARGS(cur, next),
+ TP_PROTO(struct adreno_ringbuffer *cur, struct adreno_ringbuffer *next,
+ unsigned int level),
+ TP_ARGS(cur, next, level),
TP_STRUCT__entry(
__field(struct adreno_ringbuffer *, cur)
__field(struct adreno_ringbuffer *, next)
+ __field(unsigned int, level)
),
TP_fast_assign(
__entry->cur = cur;
__entry->next = next;
+ __entry->level = level;
),
- TP_printk("done switch to id=%d from id=%d",
- __entry->next->id, __entry->cur->id
+ TP_printk("done switch to id=%d from id=%d level=%x",
+ __entry->next->id, __entry->cur->id, __entry->level
)
);
#endif /* _ADRENO_TRACE_H */
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index b354ef2..0338c5fd 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -363,58 +363,19 @@
return ret;
}
-static int _lock_if_secure_mmu(struct kgsl_memdesc *memdesc,
- struct kgsl_mmu *mmu)
-{
- struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
-
- if (!kgsl_memdesc_is_secured(memdesc))
- return 0;
-
- if (!kgsl_mmu_is_secured(mmu))
- return -EINVAL;
-
- mutex_lock(&device->mutex);
- if (kgsl_active_count_get(device)) {
- mutex_unlock(&device->mutex);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void _unlock_if_secure_mmu(struct kgsl_memdesc *memdesc,
- struct kgsl_mmu *mmu)
-{
- struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
-
- if (!kgsl_memdesc_is_secured(memdesc) || !kgsl_mmu_is_secured(mmu))
- return;
-
- kgsl_active_count_put(device);
- mutex_unlock(&device->mutex);
-}
-
static int _iommu_map_sync_pc(struct kgsl_pagetable *pt,
- struct kgsl_memdesc *memdesc,
uint64_t gpuaddr, phys_addr_t physaddr,
uint64_t size, unsigned int flags)
{
struct kgsl_iommu_pt *iommu_pt = pt->priv;
int ret;
- ret = _lock_if_secure_mmu(memdesc, pt->mmu);
- if (ret)
- return ret;
-
_iommu_sync_mmu_pc(true);
ret = iommu_map(iommu_pt->domain, gpuaddr, physaddr, size, flags);
_iommu_sync_mmu_pc(false);
- _unlock_if_secure_mmu(memdesc, pt->mmu);
-
if (ret) {
KGSL_CORE_ERR("map err: 0x%016llX, 0x%llx, 0x%x, %d\n",
gpuaddr, size, flags, ret);
@@ -425,15 +386,10 @@
}
static int _iommu_unmap_sync_pc(struct kgsl_pagetable *pt,
- struct kgsl_memdesc *memdesc, uint64_t addr, uint64_t size)
+ uint64_t addr, uint64_t size)
{
struct kgsl_iommu_pt *iommu_pt = pt->priv;
size_t unmapped = 0;
- int ret;
-
- ret = _lock_if_secure_mmu(memdesc, pt->mmu);
- if (ret)
- return ret;
_iommu_sync_mmu_pc(true);
@@ -441,8 +397,6 @@
_iommu_sync_mmu_pc(false);
- _unlock_if_secure_mmu(memdesc, pt->mmu);
-
if (unmapped != size) {
KGSL_CORE_ERR("unmap err: 0x%016llx, 0x%llx, %zd\n",
addr, size, unmapped);
@@ -453,8 +407,7 @@
}
static int _iommu_map_sg_offset_sync_pc(struct kgsl_pagetable *pt,
- uint64_t addr, struct kgsl_memdesc *memdesc,
- struct scatterlist *sg, int nents,
+ uint64_t addr, struct scatterlist *sg, int nents,
uint64_t offset, uint64_t size, unsigned int flags)
{
struct kgsl_iommu_pt *iommu_pt = pt->priv;
@@ -466,10 +419,6 @@
phys_addr_t physaddr;
int ret;
- ret = _lock_if_secure_mmu(memdesc, pt->mmu);
- if (ret)
- return ret;
-
_iommu_sync_mmu_pc(true);
for_each_sg(sg, s, nents, i) {
@@ -509,11 +458,9 @@
_iommu_sync_mmu_pc(false);
- _unlock_if_secure_mmu(memdesc, pt->mmu);
-
if (size != 0) {
/* Cleanup on error */
- _iommu_unmap_sync_pc(pt, memdesc, addr, mapped);
+ _iommu_unmap_sync_pc(pt, addr, mapped);
KGSL_CORE_ERR(
"map sg offset err: 0x%016llX, %d, %x, %zd\n",
addr, nents, flags, mapped);
@@ -524,17 +471,11 @@
}
static int _iommu_map_sg_sync_pc(struct kgsl_pagetable *pt,
- uint64_t addr, struct kgsl_memdesc *memdesc,
- struct scatterlist *sg, int nents,
+ uint64_t addr, struct scatterlist *sg, int nents,
unsigned int flags)
{
struct kgsl_iommu_pt *iommu_pt = pt->priv;
size_t mapped;
- int ret;
-
- ret = _lock_if_secure_mmu(memdesc, pt->mmu);
- if (ret)
- return ret;
_iommu_sync_mmu_pc(true);
@@ -542,8 +483,6 @@
_iommu_sync_mmu_pc(false);
- _unlock_if_secure_mmu(memdesc, pt->mmu);
-
if (mapped == 0) {
KGSL_CORE_ERR("map sg err: 0x%016llX, %d, %x, %zd\n",
addr, nents, flags, mapped);
@@ -1754,7 +1693,7 @@
if (addr == 0)
return -EINVAL;
- return _iommu_unmap_sync_pc(pt, memdesc, addr + offset, size);
+ return _iommu_unmap_sync_pc(pt, addr + offset, size);
}
static int
@@ -1819,7 +1758,7 @@
physaddr = page_to_phys(kgsl_guard_page);
}
- return _iommu_map_sync_pc(pt, memdesc, gpuaddr, physaddr,
+ return _iommu_map_sync_pc(pt, gpuaddr, physaddr,
kgsl_memdesc_guard_page_size(memdesc),
protflags & ~IOMMU_WRITE);
}
@@ -1864,14 +1803,13 @@
if (IS_ERR(sgt))
return PTR_ERR(sgt);
- ret = _iommu_map_sg_sync_pc(pt, addr, memdesc, sgt->sgl,
- sgt->nents, flags);
+ ret = _iommu_map_sg_sync_pc(pt, addr, sgt->sgl, sgt->nents, flags);
if (ret)
goto done;
ret = _iommu_map_guard_page(pt, memdesc, addr + size, flags);
if (ret)
- _iommu_unmap_sync_pc(pt, memdesc, addr, size);
+ _iommu_unmap_sync_pc(pt, addr, size);
done:
if (memdesc->pages != NULL)
@@ -1910,8 +1848,7 @@
0, size, GFP_KERNEL);
if (ret == 0) {
ret = _iommu_map_sg_sync_pc(pt, memdesc->gpuaddr + offset,
- memdesc, sgt.sgl, sgt.nents,
- IOMMU_READ | IOMMU_NOEXEC);
+ sgt.sgl, sgt.nents, IOMMU_READ | IOMMU_NOEXEC);
sg_free_table(&sgt);
}
@@ -1964,7 +1901,7 @@
ret = sg_alloc_table_from_pages(&sgt, pages, count,
0, size, GFP_KERNEL);
if (ret == 0) {
- ret = _iommu_map_sg_sync_pc(pt, addr, memdesc, sgt.sgl,
+ ret = _iommu_map_sg_sync_pc(pt, addr, sgt.sgl,
sgt.nents, map_flags);
sg_free_table(&sgt);
}
@@ -2013,7 +1950,7 @@
memdesc, physoffset, size, protflags);
else
ret = _iommu_map_sg_offset_sync_pc(pt, virtaddr + virtoffset,
- memdesc, sgt->sgl, sgt->nents,
+ sgt->sgl, sgt->nents,
physoffset, size, protflags);
if (memdesc->pages != NULL)
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 1bfb98e..c7f6c6b 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -57,7 +57,7 @@
#define SLV_ADDR_SHFT (9)
#define I2C_PACK_EN (BIT(0) | BIT(1))
-#define I2C_CORE2X_VOTE (10000)
+#define I2C_CORE2X_VOTE (960)
#define GP_IRQ0 0
#define GP_IRQ1 1
#define GP_IRQ2 2
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index efca013..36777b3 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -1214,4 +1214,14 @@
To compile this driver as a module, choose M here: the
module will be called bu21023_ts.
+config TOUCHSCREEN_SYNAPTICS_DSX
+ bool "Synaptics Touchscreen Driver"
+ depends on I2C
+ help
+ Say Y here if you have a Synaptics Touchscreen.
+
+ If unsure, say N.
+
+source "drivers/input/touchscreen/synaptics_dsx/Kconfig"
+
endif
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 81b8645..0caab59 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -71,6 +71,7 @@
obj-$(CONFIG_TOUCHSCREEN_SUN4I) += sun4i-ts.o
obj-$(CONFIG_TOUCHSCREEN_SUR40) += sur40.o
obj-$(CONFIG_TOUCHSCREEN_SURFACE3_SPI) += surface3_spi.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX) += synaptics_dsx/
obj-$(CONFIG_TOUCHSCREEN_TI_AM335X_TSC) += ti_am335x_tsc.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT) += touchright.o
diff --git a/drivers/input/touchscreen/synaptics_dsx/Kconfig b/drivers/input/touchscreen/synaptics_dsx/Kconfig
new file mode 100644
index 0000000..b2fa115
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/Kconfig
@@ -0,0 +1,128 @@
+#
+# Synaptics DSX touchscreen driver configuration
+#
+menuconfig TOUCHSCREEN_SYNAPTICS_DSX
+ bool "Synaptics DSX touchscreen"
+ default y
+ help
+ Say Y here if you have a Synaptics DSX touchscreen connected
+ to your system.
+
+ If unsure, say N.
+
+if TOUCHSCREEN_SYNAPTICS_DSX
+
+choice
+ default TOUCHSCREEN_SYNAPTICS_DSX_I2C
+ prompt "Synaptics DSX bus interface"
+config TOUCHSCREEN_SYNAPTICS_DSX_I2C
+ bool "RMI over I2C"
+ depends on I2C
+config TOUCHSCREEN_SYNAPTICS_DSX_SPI
+ bool "RMI over SPI"
+ depends on SPI_MASTER
+config TOUCHSCREEN_SYNAPTICS_DSX_RMI_HID_I2C
+ bool "HID over I2C"
+ depends on I2C
+endchoice
+
+config TOUCHSCREEN_SYNAPTICS_DSX_CORE
+ tristate "Synaptics DSX core driver module"
+ depends on I2C || SPI_MASTER
+ help
+ Say Y here to enable basic touch reporting functionality.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called synaptics_dsx_core.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV
+ tristate "Synaptics DSX RMI device module"
+ depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+ help
+ Say Y here to enable support for direct RMI register access.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called synaptics_dsx_rmi_dev.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE
+ tristate "Synaptics DSX firmware update module"
+ depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+ help
+ Say Y here to enable support for doing firmware update.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called synaptics_dsx_fw_update.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_TEST_REPORTING
+ tristate "Synaptics DSX test reporting module"
+ depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+ help
+ Say Y here to enable support for retrieving production test reports.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called synaptics_dsx_test_reporting.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_PROXIMITY
+ tristate "Synaptics DSX proximity module"
+ depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+ help
+ Say Y here to enable support for proximity functionality.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called synaptics_dsx_proximity.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_ACTIVE_PEN
+ tristate "Synaptics DSX active pen module"
+ depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+ help
+ Say Y here to enable support for active pen functionality.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called synaptics_dsx_active_pen.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_GESTURE
+ tristate "Synaptics DSX user defined gesture module"
+ depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+ help
+ Say Y here to enable support for user defined gesture functionality.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called synaptics_dsx_gesture.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_VIDEO
+ tristate "Synaptics DSX video module"
+ depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+ help
+ Say Y here to enable support for video communication functionality.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called synaptics_dsx_video.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_DEBUG
+ tristate "Synaptics DSX debug module"
+ depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+ help
+ Say Y here to enable support for firmware debug functionality.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called synaptics_dsx_debug.
+
+endif
diff --git a/drivers/input/touchscreen/synaptics_dsx/Makefile b/drivers/input/touchscreen/synaptics_dsx/Makefile
new file mode 100644
index 0000000..191dcdc
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for the Synaptics DSX touchscreen driver.
+#
+
+# Each configuration option enables a list of files.
+
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_I2C) += synaptics_dsx_i2c.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_SPI) += synaptics_dsx_spi.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_HID_I2C) += synaptics_dsx_rmi_hid_i2c.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE) += synaptics_dsx_core.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV) += synaptics_dsx_rmi_dev.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE) += synaptics_dsx_fw_update.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_TEST_REPORTING) += synaptics_dsx_test_reporting.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_PROXIMITY) += synaptics_dsx_proximity.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_ACTIVE_PEN) += synaptics_dsx_active_pen.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_GESTURE) += synaptics_dsx_gesture.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_VIDEO) += synaptics_dsx_video.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_DEBUG) += synaptics_dsx_debug.o
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_active_pen.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_active_pen.c
new file mode 100644
index 0000000..3666e87
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_active_pen.c
@@ -0,0 +1,607 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define APEN_PHYS_NAME "synaptics_dsx/active_pen"
+
+#define ACTIVE_PEN_MAX_PRESSURE_16BIT 65535
+#define ACTIVE_PEN_MAX_PRESSURE_8BIT 255
+
+struct synaptics_rmi4_f12_query_8 {
+ union {
+ struct {
+ unsigned char size_of_query9;
+ struct {
+ unsigned char data0_is_present:1;
+ unsigned char data1_is_present:1;
+ unsigned char data2_is_present:1;
+ unsigned char data3_is_present:1;
+ unsigned char data4_is_present:1;
+ unsigned char data5_is_present:1;
+ unsigned char data6_is_present:1;
+ unsigned char data7_is_present:1;
+ } __packed;
+ };
+ unsigned char data[2];
+ };
+};
+
+struct apen_data_8b_pressure {
+ union {
+ struct {
+ unsigned char status_pen:1;
+ unsigned char status_invert:1;
+ unsigned char status_barrel:1;
+ unsigned char status_reserved:5;
+ unsigned char x_lsb;
+ unsigned char x_msb;
+ unsigned char y_lsb;
+ unsigned char y_msb;
+ unsigned char pressure_msb;
+ unsigned char battery_state;
+ unsigned char pen_id_0_7;
+ unsigned char pen_id_8_15;
+ unsigned char pen_id_16_23;
+ unsigned char pen_id_24_31;
+ } __packed;
+ unsigned char data[11];
+ };
+};
+
+struct apen_data {
+ union {
+ struct {
+ unsigned char status_pen:1;
+ unsigned char status_invert:1;
+ unsigned char status_barrel:1;
+ unsigned char status_reserved:5;
+ unsigned char x_lsb;
+ unsigned char x_msb;
+ unsigned char y_lsb;
+ unsigned char y_msb;
+ unsigned char pressure_lsb;
+ unsigned char pressure_msb;
+ unsigned char battery_state;
+ unsigned char pen_id_0_7;
+ unsigned char pen_id_8_15;
+ unsigned char pen_id_16_23;
+ unsigned char pen_id_24_31;
+ } __packed;
+ unsigned char data[12];
+ };
+};
+
+struct synaptics_rmi4_apen_handle {
+ bool apen_present;
+ unsigned char intr_mask;
+ unsigned char battery_state;
+ unsigned short query_base_addr;
+ unsigned short control_base_addr;
+ unsigned short data_base_addr;
+ unsigned short command_base_addr;
+ unsigned short apen_data_addr;
+ unsigned short max_pressure;
+ unsigned int pen_id;
+ struct input_dev *apen_dev;
+ struct apen_data *apen_data;
+ struct synaptics_rmi4_data *rmi4_data;
+};
+
+static struct synaptics_rmi4_apen_handle *apen;
+
+DECLARE_COMPLETION(apen_remove_complete);
+
+static void apen_lift(void)
+{
+ input_report_key(apen->apen_dev, BTN_TOUCH, 0);
+ input_report_key(apen->apen_dev, BTN_TOOL_PEN, 0);
+ input_report_key(apen->apen_dev, BTN_TOOL_RUBBER, 0);
+ input_sync(apen->apen_dev);
+ apen->apen_present = false;
+}
+
+static void apen_report(void)
+{
+ int retval;
+ int x;
+ int y;
+ int pressure;
+ static int invert = -1;
+ struct apen_data_8b_pressure *apen_data_8b;
+ struct synaptics_rmi4_data *rmi4_data = apen->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ apen->apen_data_addr,
+ apen->apen_data->data,
+ sizeof(apen->apen_data->data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read active pen data\n",
+ __func__);
+ return;
+ }
+
+ if (apen->apen_data->status_pen == 0) {
+ if (apen->apen_present)
+ apen_lift();
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: No active pen data\n",
+ __func__);
+
+ return;
+ }
+
+ x = (apen->apen_data->x_msb << 8) | (apen->apen_data->x_lsb);
+ y = (apen->apen_data->y_msb << 8) | (apen->apen_data->y_lsb);
+
+ if ((x == -1) && (y == -1)) {
+ if (apen->apen_present)
+ apen_lift();
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Active pen in range but no valid x & y\n",
+ __func__);
+
+ return;
+ }
+
+ if (!apen->apen_present)
+ invert = -1;
+
+ if (invert != -1 && invert != apen->apen_data->status_invert)
+ apen_lift();
+
+ invert = apen->apen_data->status_invert;
+
+ if (apen->max_pressure == ACTIVE_PEN_MAX_PRESSURE_16BIT) {
+ pressure = (apen->apen_data->pressure_msb << 8) |
+ apen->apen_data->pressure_lsb;
+ apen->battery_state = apen->apen_data->battery_state;
+ apen->pen_id = (apen->apen_data->pen_id_24_31 << 24) |
+ (apen->apen_data->pen_id_16_23 << 16) |
+ (apen->apen_data->pen_id_8_15 << 8) |
+ apen->apen_data->pen_id_0_7;
+ } else {
+ apen_data_8b = (struct apen_data_8b_pressure *)apen->apen_data;
+ pressure = apen_data_8b->pressure_msb;
+ apen->battery_state = apen_data_8b->battery_state;
+ apen->pen_id = (apen_data_8b->pen_id_24_31 << 24) |
+ (apen_data_8b->pen_id_16_23 << 16) |
+ (apen_data_8b->pen_id_8_15 << 8) |
+ apen_data_8b->pen_id_0_7;
+ }
+
+ input_report_key(apen->apen_dev, BTN_TOUCH, pressure > 0 ? 1 : 0);
+ input_report_key(apen->apen_dev,
+ apen->apen_data->status_invert > 0 ?
+ BTN_TOOL_RUBBER : BTN_TOOL_PEN, 1);
+ input_report_key(apen->apen_dev,
+ BTN_STYLUS, apen->apen_data->status_barrel > 0 ?
+ 1 : 0);
+ input_report_abs(apen->apen_dev, ABS_X, x);
+ input_report_abs(apen->apen_dev, ABS_Y, y);
+ input_report_abs(apen->apen_dev, ABS_PRESSURE, pressure);
+
+ input_sync(apen->apen_dev);
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Active pen: status = %d, invert = %d, barrel = %d, x = %d, y = %d, pressure = %d\n",
+ __func__,
+ apen->apen_data->status_pen,
+ apen->apen_data->status_invert,
+ apen->apen_data->status_barrel,
+ x, y, pressure);
+
+ apen->apen_present = true;
+}
+
+static void apen_set_params(void)
+{
+ input_set_abs_params(apen->apen_dev, ABS_X, 0,
+ apen->rmi4_data->sensor_max_x, 0, 0);
+ input_set_abs_params(apen->apen_dev, ABS_Y, 0,
+ apen->rmi4_data->sensor_max_y, 0, 0);
+ input_set_abs_params(apen->apen_dev, ABS_PRESSURE, 0,
+ apen->max_pressure, 0, 0);
+
+ return;
+}
+
+static int apen_pressure(struct synaptics_rmi4_f12_query_8 *query_8)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char data_reg_presence;
+ unsigned char size_of_query_9;
+ unsigned char *query_9;
+ unsigned char *data_desc;
+ struct synaptics_rmi4_data *rmi4_data = apen->rmi4_data;
+
+ data_reg_presence = query_8->data[1];
+
+ size_of_query_9 = query_8->size_of_query9;
+ query_9 = kmalloc(size_of_query_9, GFP_KERNEL);
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ apen->query_base_addr + 9,
+ query_9,
+ size_of_query_9);
+ if (retval < 0)
+ goto exit;
+
+ data_desc = query_9;
+
+ for (ii = 0; ii < 6; ii++) {
+ if (!(data_reg_presence & (1 << ii)))
+ continue; /* The data register is not present */
+ data_desc++; /* Jump over the size entry */
+ while (*data_desc & (1 << 7))
+ data_desc++;
+ data_desc++; /* Go to the next descriptor */
+ }
+
+ data_desc++; /* Jump over the size entry */
+ /* Check for the presence of subpackets 1 and 2 */
+ if ((*data_desc & (3 << 1)) == (3 << 1))
+ apen->max_pressure = ACTIVE_PEN_MAX_PRESSURE_16BIT;
+ else
+ apen->max_pressure = ACTIVE_PEN_MAX_PRESSURE_8BIT;
+
+exit:
+ kfree(query_9);
+
+ return retval;
+}
+
+static int apen_reg_init(void)
+{
+ int retval;
+ unsigned char data_offset;
+ unsigned char size_of_query8;
+ struct synaptics_rmi4_f12_query_8 query_8;
+ struct synaptics_rmi4_data *rmi4_data = apen->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ apen->query_base_addr + 7,
+ &size_of_query8,
+ sizeof(size_of_query8));
+ if (retval < 0)
+ return retval;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ apen->query_base_addr + 8,
+ query_8.data,
+ sizeof(query_8.data));
+ if (retval < 0)
+ return retval;
+
+ if ((size_of_query8 >= 2) && (query_8.data6_is_present)) {
+ data_offset = query_8.data0_is_present +
+ query_8.data1_is_present +
+ query_8.data2_is_present +
+ query_8.data3_is_present +
+ query_8.data4_is_present +
+ query_8.data5_is_present;
+ apen->apen_data_addr = apen->data_base_addr + data_offset;
+ retval = apen_pressure(&query_8);
+ if (retval < 0)
+ return retval;
+ } else {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Active pen support unavailable\n",
+ __func__);
+ retval = -ENODEV;
+ }
+
+ return retval;
+}
+
+static int apen_scan_pdt(void)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char page;
+ unsigned char intr_count = 0;
+ unsigned char intr_off;
+ unsigned char intr_src;
+ unsigned short addr;
+ struct synaptics_rmi4_fn_desc fd;
+ struct synaptics_rmi4_data *rmi4_data = apen->rmi4_data;
+
+ for (page = 0; page < PAGES_TO_SERVICE; page++) {
+ for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+ addr |= (page << 8);
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ addr,
+ (unsigned char *)&fd,
+ sizeof(fd));
+ if (retval < 0)
+ return retval;
+
+ addr &= ~(MASK_8BIT << 8);
+
+ if (fd.fn_number) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Found F%02x\n",
+ __func__, fd.fn_number);
+ switch (fd.fn_number) {
+ case SYNAPTICS_RMI4_F12:
+ goto f12_found;
+ }
+ } else {
+ break;
+ }
+
+ intr_count += fd.intr_src_count;
+ }
+ }
+
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to find F12\n",
+ __func__);
+ return -EINVAL;
+
+f12_found:
+ apen->query_base_addr = fd.query_base_addr | (page << 8);
+ apen->control_base_addr = fd.ctrl_base_addr | (page << 8);
+ apen->data_base_addr = fd.data_base_addr | (page << 8);
+ apen->command_base_addr = fd.cmd_base_addr | (page << 8);
+
+ retval = apen_reg_init();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to initialize active pen registers\n",
+ __func__);
+ return retval;
+ }
+
+ apen->intr_mask = 0;
+ intr_src = fd.intr_src_count;
+ intr_off = intr_count % 8;
+ for (ii = intr_off;
+ ii < (intr_src + intr_off);
+ ii++) {
+ apen->intr_mask |= 1 << ii;
+ }
+
+ rmi4_data->intr_mask[0] |= apen->intr_mask;
+
+ addr = rmi4_data->f01_ctrl_base_addr + 1;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ addr,
+ &(rmi4_data->intr_mask[0]),
+ sizeof(rmi4_data->intr_mask[0]));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set interrupt enable bit\n",
+ __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+static void synaptics_rmi4_apen_attn(struct synaptics_rmi4_data *rmi4_data,
+ unsigned char intr_mask)
+{
+ if (!apen)
+ return;
+
+ if (apen->intr_mask & intr_mask)
+ apen_report();
+
+ return;
+}
+
+static int synaptics_rmi4_apen_init(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+
+ if (apen) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Handle already exists\n",
+ __func__);
+ return 0;
+ }
+
+ apen = kzalloc(sizeof(*apen), GFP_KERNEL);
+ if (!apen) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for apen\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ apen->apen_data = kzalloc(sizeof(*(apen->apen_data)), GFP_KERNEL);
+ if (!apen->apen_data) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for apen_data\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit_free_apen;
+ }
+
+ apen->rmi4_data = rmi4_data;
+
+ retval = apen_scan_pdt();
+ if (retval < 0)
+ goto exit_free_apen_data;
+
+ apen->apen_dev = input_allocate_device();
+ if (apen->apen_dev == NULL) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to allocate active pen device\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit_free_apen_data;
+ }
+
+ apen->apen_dev->name = ACTIVE_PEN_DRIVER_NAME;
+ apen->apen_dev->phys = APEN_PHYS_NAME;
+ apen->apen_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+ apen->apen_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+ apen->apen_dev->dev.parent = rmi4_data->pdev->dev.parent;
+ input_set_drvdata(apen->apen_dev, rmi4_data);
+
+ set_bit(EV_KEY, apen->apen_dev->evbit);
+ set_bit(EV_ABS, apen->apen_dev->evbit);
+ set_bit(BTN_TOUCH, apen->apen_dev->keybit);
+ set_bit(BTN_TOOL_PEN, apen->apen_dev->keybit);
+ set_bit(BTN_TOOL_RUBBER, apen->apen_dev->keybit);
+ set_bit(BTN_STYLUS, apen->apen_dev->keybit);
+#ifdef INPUT_PROP_DIRECT
+ set_bit(INPUT_PROP_DIRECT, apen->apen_dev->propbit);
+#endif
+
+ apen_set_params();
+
+ retval = input_register_device(apen->apen_dev);
+ if (retval) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to register active pen device\n",
+ __func__);
+ goto exit_free_input_device;
+ }
+
+ return 0;
+
+exit_free_input_device:
+ input_free_device(apen->apen_dev);
+
+exit_free_apen_data:
+ kfree(apen->apen_data);
+
+exit_free_apen:
+ kfree(apen);
+ apen = NULL;
+
+exit:
+ return retval;
+}
+
+static void synaptics_rmi4_apen_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!apen)
+ goto exit;
+
+ input_unregister_device(apen->apen_dev);
+ kfree(apen->apen_data);
+ kfree(apen);
+ apen = NULL;
+
+exit:
+ complete(&apen_remove_complete);
+}
+
+static void synaptics_rmi4_apen_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!apen) {
+ synaptics_rmi4_apen_init(rmi4_data);
+ return;
+ }
+
+ apen_lift();
+
+ apen_scan_pdt();
+}
+
+static void synaptics_rmi4_apen_reinit(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!apen)
+ return;
+
+ apen_lift();
+}
+
+static void synaptics_rmi4_apen_e_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!apen)
+ return;
+
+ apen_lift();
+}
+
+static void synaptics_rmi4_apen_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!apen)
+ return;
+
+ apen_lift();
+}
+
+static struct synaptics_rmi4_exp_fn active_pen_module = {
+ .fn_type = RMI_ACTIVE_PEN,
+ .init = synaptics_rmi4_apen_init,
+ .remove = synaptics_rmi4_apen_remove,
+ .reset = synaptics_rmi4_apen_reset,
+ .reinit = synaptics_rmi4_apen_reinit,
+ .early_suspend = synaptics_rmi4_apen_e_suspend,
+ .suspend = synaptics_rmi4_apen_suspend,
+ .resume = NULL,
+ .late_resume = NULL,
+ .attn = synaptics_rmi4_apen_attn,
+};
+
+static int __init rmi4_active_pen_module_init(void)
+{
+ synaptics_rmi4_new_function(&active_pen_module, true);
+
+ return 0;
+}
+
+static void __exit rmi4_active_pen_module_exit(void)
+{
+ synaptics_rmi4_new_function(&active_pen_module, false);
+
+ wait_for_completion(&apen_remove_complete);
+}
+
+module_init(rmi4_active_pen_module_init);
+module_exit(rmi4_active_pen_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Active Pen Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c
new file mode 100644
index 0000000..9ce3026
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c
@@ -0,0 +1,4879 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+#ifdef KERNEL_ABOVE_2_6_38
+#include <linux/input/mt.h>
+#endif
+
+#include <linux/msm_drm_notify.h>
+
+#define INPUT_PHYS_NAME "synaptics_dsx/touch_input"
+#define STYLUS_PHYS_NAME "synaptics_dsx/stylus"
+
+#define VIRTUAL_KEY_MAP_FILE_NAME "virtualkeys." PLATFORM_DRIVER_NAME
+
+#ifdef KERNEL_ABOVE_2_6_38
+#define TYPE_B_PROTOCOL
+#endif
+
+/*
+#define USE_DATA_SERVER
+*/
+
+#define WAKEUP_GESTURE false
+
+#define NO_0D_WHILE_2D
+#define REPORT_2D_Z
+#define REPORT_2D_W
+/*
+#define REPORT_2D_PRESSURE
+*/
+
+#define F12_DATA_15_WORKAROUND
+
+#define IGNORE_FN_INIT_FAILURE
+#define FB_READY_RESET
+#define FB_READY_WAIT_MS 100
+#define FB_READY_TIMEOUT_S 30
+#ifdef SYNA_TDDI
+#define TDDI_LPWG_WAIT_US 10
+#endif
+#define RPT_TYPE (1 << 0)
+#define RPT_X_LSB (1 << 1)
+#define RPT_X_MSB (1 << 2)
+#define RPT_Y_LSB (1 << 3)
+#define RPT_Y_MSB (1 << 4)
+#define RPT_Z (1 << 5)
+#define RPT_WX (1 << 6)
+#define RPT_WY (1 << 7)
+#define RPT_DEFAULT (RPT_TYPE | RPT_X_LSB | RPT_X_MSB | RPT_Y_LSB | RPT_Y_MSB)
+
+#define REBUILD_WORK_DELAY_MS 500 /* ms */
+
+#define EXP_FN_WORK_DELAY_MS 500 /* ms */
+#define MAX_F11_TOUCH_WIDTH 15
+#define MAX_F12_TOUCH_WIDTH 255
+
+#define CHECK_STATUS_TIMEOUT_MS 100
+
+#define F01_STD_QUERY_LEN 21
+#define F01_BUID_ID_OFFSET 18
+
+#define STATUS_NO_ERROR 0x00
+#define STATUS_RESET_OCCURRED 0x01
+#define STATUS_INVALID_CONFIG 0x02
+#define STATUS_DEVICE_FAILURE 0x03
+#define STATUS_CONFIG_CRC_FAILURE 0x04
+#define STATUS_FIRMWARE_CRC_FAILURE 0x05
+#define STATUS_CRC_IN_PROGRESS 0x06
+
+#define NORMAL_OPERATION (0 << 0)
+#define SENSOR_SLEEP (1 << 0)
+#define NO_SLEEP_OFF (0 << 2)
+#define NO_SLEEP_ON (1 << 2)
+#define CONFIGURED (1 << 7)
+
+#define F11_CONTINUOUS_MODE 0x00
+#define F11_WAKEUP_GESTURE_MODE 0x04
+#define F12_CONTINUOUS_MODE 0x00
+#define F12_WAKEUP_GESTURE_MODE 0x02
+#define F12_UDG_DETECT 0x0f
+
+static int synaptics_rmi4_check_status(struct synaptics_rmi4_data *rmi4_data,
+ bool *was_in_bl_mode);
+static int synaptics_rmi4_free_fingers(struct synaptics_rmi4_data *rmi4_data);
+static int synaptics_rmi4_reinit_device(struct synaptics_rmi4_data *rmi4_data);
+static int synaptics_rmi4_reset_device(struct synaptics_rmi4_data *rmi4_data,
+ bool rebuild);
+#ifdef CONFIG_FB
+static int synaptics_rmi4_dsi_panel_notifier_cb(struct notifier_block *self,
+ unsigned long event, void *data);
+#endif
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#ifndef CONFIG_FB
+#define USE_EARLYSUSPEND
+#endif
+#endif
+
+#ifdef USE_EARLYSUSPEND
+static int synaptics_rmi4_early_suspend(struct early_suspend *h);
+
+static int synaptics_rmi4_late_resume(struct early_suspend *h);
+#endif
+
+static int synaptics_rmi4_suspend(struct device *dev);
+
+static int synaptics_rmi4_resume(struct device *dev);
+
+static ssize_t synaptics_rmi4_f01_reset_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t synaptics_rmi4_f01_productinfo_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_f01_buildid_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_f01_flashprog_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_0dbutton_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_0dbutton_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t synaptics_rmi4_suspend_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t synaptics_rmi4_wake_gesture_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_wake_gesture_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+#ifdef USE_DATA_SERVER
+static ssize_t synaptics_rmi4_synad_pid_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+#endif
+
+static ssize_t synaptics_rmi4_virtual_key_map_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf);
+
+struct synaptics_rmi4_f01_device_status {
+ union {
+ struct {
+ unsigned char status_code:4;
+ unsigned char reserved:2;
+ unsigned char flash_prog:1;
+ unsigned char unconfigured:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct synaptics_rmi4_f11_query_0_5 {
+ union {
+ struct {
+ /* query 0 */
+ unsigned char f11_query0_b0__2:3;
+ unsigned char has_query_9:1;
+ unsigned char has_query_11:1;
+ unsigned char has_query_12:1;
+ unsigned char has_query_27:1;
+ unsigned char has_query_28:1;
+
+ /* query 1 */
+ unsigned char num_of_fingers:3;
+ unsigned char has_rel:1;
+ unsigned char has_abs:1;
+ unsigned char has_gestures:1;
+ unsigned char has_sensitibity_adjust:1;
+ unsigned char f11_query1_b7:1;
+
+ /* query 2 */
+ unsigned char num_of_x_electrodes;
+
+ /* query 3 */
+ unsigned char num_of_y_electrodes;
+
+ /* query 4 */
+ unsigned char max_electrodes:7;
+ unsigned char f11_query4_b7:1;
+
+ /* query 5 */
+ unsigned char abs_data_size:2;
+ unsigned char has_anchored_finger:1;
+ unsigned char has_adj_hyst:1;
+ unsigned char has_dribble:1;
+ unsigned char has_bending_correction:1;
+ unsigned char has_large_object_suppression:1;
+ unsigned char has_jitter_filter:1;
+ } __packed;
+ unsigned char data[6];
+ };
+};
+
+struct synaptics_rmi4_f11_query_7_8 {
+ union {
+ struct {
+ /* query 7 */
+ unsigned char has_single_tap:1;
+ unsigned char has_tap_and_hold:1;
+ unsigned char has_double_tap:1;
+ unsigned char has_early_tap:1;
+ unsigned char has_flick:1;
+ unsigned char has_press:1;
+ unsigned char has_pinch:1;
+ unsigned char has_chiral_scroll:1;
+
+ /* query 8 */
+ unsigned char has_palm_detect:1;
+ unsigned char has_rotate:1;
+ unsigned char has_touch_shapes:1;
+ unsigned char has_scroll_zones:1;
+ unsigned char individual_scroll_zones:1;
+ unsigned char has_multi_finger_scroll:1;
+ unsigned char has_multi_finger_scroll_edge_motion:1;
+ unsigned char has_multi_finger_scroll_inertia:1;
+ } __packed;
+ unsigned char data[2];
+ };
+};
+
+struct synaptics_rmi4_f11_query_9 {
+ union {
+ struct {
+ unsigned char has_pen:1;
+ unsigned char has_proximity:1;
+ unsigned char has_large_object_sensitivity:1;
+ unsigned char has_suppress_on_large_object_detect:1;
+ unsigned char has_two_pen_thresholds:1;
+ unsigned char has_contact_geometry:1;
+ unsigned char has_pen_hover_discrimination:1;
+ unsigned char has_pen_hover_and_edge_filters:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct synaptics_rmi4_f11_query_12 {
+ union {
+ struct {
+ unsigned char has_small_object_detection:1;
+ unsigned char has_small_object_detection_tuning:1;
+ unsigned char has_8bit_w:1;
+ unsigned char has_2d_adjustable_mapping:1;
+ unsigned char has_general_information_2:1;
+ unsigned char has_physical_properties:1;
+ unsigned char has_finger_limit:1;
+ unsigned char has_linear_cofficient_2:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct synaptics_rmi4_f11_query_27 {
+ union {
+ struct {
+ unsigned char f11_query27_b0:1;
+ unsigned char has_pen_position_correction:1;
+ unsigned char has_pen_jitter_filter_coefficient:1;
+ unsigned char has_group_decomposition:1;
+ unsigned char has_wakeup_gesture:1;
+ unsigned char has_small_finger_correction:1;
+ unsigned char has_data_37:1;
+ unsigned char f11_query27_b7:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct synaptics_rmi4_f11_ctrl_6_9 {
+ union {
+ struct {
+ unsigned char sensor_max_x_pos_7_0;
+ unsigned char sensor_max_x_pos_11_8:4;
+ unsigned char f11_ctrl7_b4__7:4;
+ unsigned char sensor_max_y_pos_7_0;
+ unsigned char sensor_max_y_pos_11_8:4;
+ unsigned char f11_ctrl9_b4__7:4;
+ } __packed;
+ unsigned char data[4];
+ };
+};
+
+struct synaptics_rmi4_f11_data_1_5 {
+ union {
+ struct {
+ unsigned char x_position_11_4;
+ unsigned char y_position_11_4;
+ unsigned char x_position_3_0:4;
+ unsigned char y_position_3_0:4;
+ unsigned char wx:4;
+ unsigned char wy:4;
+ unsigned char z;
+ } __packed;
+ unsigned char data[5];
+ };
+};
+
+struct synaptics_rmi4_f12_query_5 {
+ union {
+ struct {
+ unsigned char size_of_query6;
+ struct {
+ unsigned char ctrl0_is_present:1;
+ unsigned char ctrl1_is_present:1;
+ unsigned char ctrl2_is_present:1;
+ unsigned char ctrl3_is_present:1;
+ unsigned char ctrl4_is_present:1;
+ unsigned char ctrl5_is_present:1;
+ unsigned char ctrl6_is_present:1;
+ unsigned char ctrl7_is_present:1;
+ } __packed;
+ struct {
+ unsigned char ctrl8_is_present:1;
+ unsigned char ctrl9_is_present:1;
+ unsigned char ctrl10_is_present:1;
+ unsigned char ctrl11_is_present:1;
+ unsigned char ctrl12_is_present:1;
+ unsigned char ctrl13_is_present:1;
+ unsigned char ctrl14_is_present:1;
+ unsigned char ctrl15_is_present:1;
+ } __packed;
+ struct {
+ unsigned char ctrl16_is_present:1;
+ unsigned char ctrl17_is_present:1;
+ unsigned char ctrl18_is_present:1;
+ unsigned char ctrl19_is_present:1;
+ unsigned char ctrl20_is_present:1;
+ unsigned char ctrl21_is_present:1;
+ unsigned char ctrl22_is_present:1;
+ unsigned char ctrl23_is_present:1;
+ } __packed;
+ struct {
+ unsigned char ctrl24_is_present:1;
+ unsigned char ctrl25_is_present:1;
+ unsigned char ctrl26_is_present:1;
+ unsigned char ctrl27_is_present:1;
+ unsigned char ctrl28_is_present:1;
+ unsigned char ctrl29_is_present:1;
+ unsigned char ctrl30_is_present:1;
+ unsigned char ctrl31_is_present:1;
+ } __packed;
+ struct {
+ unsigned char ctrl32_is_present:1;
+ unsigned char ctrl33_is_present:1;
+ unsigned char ctrl34_is_present:1;
+ unsigned char ctrl35_is_present:1;
+ unsigned char ctrl36_is_present:1;
+ unsigned char ctrl37_is_present:1;
+ unsigned char ctrl38_is_present:1;
+ unsigned char ctrl39_is_present:1;
+ } __packed;
+ struct {
+ unsigned char ctrl40_is_present:1;
+ unsigned char ctrl41_is_present:1;
+ unsigned char ctrl42_is_present:1;
+ unsigned char ctrl43_is_present:1;
+ unsigned char ctrl44_is_present:1;
+ unsigned char ctrl45_is_present:1;
+ unsigned char ctrl46_is_present:1;
+ unsigned char ctrl47_is_present:1;
+ } __packed;
+ struct {
+ unsigned char ctrl48_is_present:1;
+ unsigned char ctrl49_is_present:1;
+ unsigned char ctrl50_is_present:1;
+ unsigned char ctrl51_is_present:1;
+ unsigned char ctrl52_is_present:1;
+ unsigned char ctrl53_is_present:1;
+ unsigned char ctrl54_is_present:1;
+ unsigned char ctrl55_is_present:1;
+ } __packed;
+ struct {
+ unsigned char ctrl56_is_present:1;
+ unsigned char ctrl57_is_present:1;
+ unsigned char ctrl58_is_present:1;
+ unsigned char ctrl59_is_present:1;
+ unsigned char ctrl60_is_present:1;
+ unsigned char ctrl61_is_present:1;
+ unsigned char ctrl62_is_present:1;
+ unsigned char ctrl63_is_present:1;
+ } __packed;
+ };
+ unsigned char data[9];
+ };
+};
+
+struct synaptics_rmi4_f12_query_8 {
+ union {
+ struct {
+ unsigned char size_of_query9;
+ struct {
+ unsigned char data0_is_present:1;
+ unsigned char data1_is_present:1;
+ unsigned char data2_is_present:1;
+ unsigned char data3_is_present:1;
+ unsigned char data4_is_present:1;
+ unsigned char data5_is_present:1;
+ unsigned char data6_is_present:1;
+ unsigned char data7_is_present:1;
+ } __packed;
+ struct {
+ unsigned char data8_is_present:1;
+ unsigned char data9_is_present:1;
+ unsigned char data10_is_present:1;
+ unsigned char data11_is_present:1;
+ unsigned char data12_is_present:1;
+ unsigned char data13_is_present:1;
+ unsigned char data14_is_present:1;
+ unsigned char data15_is_present:1;
+ } __packed;
+ struct {
+ unsigned char data16_is_present:1;
+ unsigned char data17_is_present:1;
+ unsigned char data18_is_present:1;
+ unsigned char data19_is_present:1;
+ unsigned char data20_is_present:1;
+ unsigned char data21_is_present:1;
+ unsigned char data22_is_present:1;
+ unsigned char data23_is_present:1;
+ } __packed;
+ struct {
+ unsigned char data24_is_present:1;
+ unsigned char data25_is_present:1;
+ unsigned char data26_is_present:1;
+ unsigned char data27_is_present:1;
+ unsigned char data28_is_present:1;
+ unsigned char data29_is_present:1;
+ unsigned char data30_is_present:1;
+ unsigned char data31_is_present:1;
+ } __packed;
+ };
+ unsigned char data[5];
+ };
+};
+
+struct synaptics_rmi4_f12_ctrl_8 {
+ union {
+ struct {
+ unsigned char max_x_coord_lsb;
+ unsigned char max_x_coord_msb;
+ unsigned char max_y_coord_lsb;
+ unsigned char max_y_coord_msb;
+ unsigned char rx_pitch_lsb;
+ unsigned char rx_pitch_msb;
+ unsigned char tx_pitch_lsb;
+ unsigned char tx_pitch_msb;
+ unsigned char low_rx_clip;
+ unsigned char high_rx_clip;
+ unsigned char low_tx_clip;
+ unsigned char high_tx_clip;
+ unsigned char num_of_rx;
+ unsigned char num_of_tx;
+ };
+ unsigned char data[14];
+ };
+};
+
+struct synaptics_rmi4_f12_ctrl_23 {
+ union {
+ struct {
+ unsigned char finger_enable:1;
+ unsigned char active_stylus_enable:1;
+ unsigned char palm_enable:1;
+ unsigned char unclassified_object_enable:1;
+ unsigned char hovering_finger_enable:1;
+ unsigned char gloved_finger_enable:1;
+ unsigned char f12_ctr23_00_b6__7:2;
+ unsigned char max_reported_objects;
+ unsigned char f12_ctr23_02_b0:1;
+ unsigned char report_active_stylus_as_finger:1;
+ unsigned char report_palm_as_finger:1;
+ unsigned char report_unclassified_object_as_finger:1;
+ unsigned char report_hovering_finger_as_finger:1;
+ unsigned char report_gloved_finger_as_finger:1;
+ unsigned char report_narrow_object_swipe_as_finger:1;
+ unsigned char report_handedge_as_finger:1;
+ unsigned char cover_enable:1;
+ unsigned char stylus_enable:1;
+ unsigned char eraser_enable:1;
+ unsigned char small_object_enable:1;
+ unsigned char f12_ctr23_03_b4__7:4;
+ unsigned char report_cover_as_finger:1;
+ unsigned char report_stylus_as_finger:1;
+ unsigned char report_eraser_as_finger:1;
+ unsigned char report_small_object_as_finger:1;
+ unsigned char f12_ctr23_04_b4__7:4;
+ };
+ unsigned char data[5];
+ };
+};
+
+struct synaptics_rmi4_f12_ctrl_31 {
+ union {
+ struct {
+ unsigned char max_x_coord_lsb;
+ unsigned char max_x_coord_msb;
+ unsigned char max_y_coord_lsb;
+ unsigned char max_y_coord_msb;
+ unsigned char rx_pitch_lsb;
+ unsigned char rx_pitch_msb;
+ unsigned char rx_clip_low;
+ unsigned char rx_clip_high;
+ unsigned char wedge_clip_low;
+ unsigned char wedge_clip_high;
+ unsigned char num_of_p;
+ unsigned char num_of_q;
+ };
+ unsigned char data[12];
+ };
+};
+
+struct synaptics_rmi4_f12_ctrl_58 {
+ union {
+ struct {
+ unsigned char reporting_format;
+ unsigned char f12_ctr58_00_reserved;
+ unsigned char min_force_lsb;
+ unsigned char min_force_msb;
+ unsigned char max_force_lsb;
+ unsigned char max_force_msb;
+ unsigned char light_press_threshold_lsb;
+ unsigned char light_press_threshold_msb;
+ unsigned char light_press_hysteresis_lsb;
+ unsigned char light_press_hysteresis_msb;
+ unsigned char hard_press_threshold_lsb;
+ unsigned char hard_press_threshold_msb;
+ unsigned char hard_press_hysteresis_lsb;
+ unsigned char hard_press_hysteresis_msb;
+ };
+ unsigned char data[14];
+ };
+};
+
+struct synaptics_rmi4_f12_finger_data {
+ unsigned char object_type_and_status;
+ unsigned char x_lsb;
+ unsigned char x_msb;
+ unsigned char y_lsb;
+ unsigned char y_msb;
+#ifdef REPORT_2D_Z
+ unsigned char z;
+#endif
+#ifdef REPORT_2D_W
+ unsigned char wx;
+ unsigned char wy;
+#endif
+};
+
+struct synaptics_rmi4_f1a_query {
+ union {
+ struct {
+ unsigned char max_button_count:3;
+ unsigned char f1a_query0_b3__4:2;
+ unsigned char has_query4:1;
+ unsigned char has_query3:1;
+ unsigned char has_query2:1;
+ unsigned char has_general_control:1;
+ unsigned char has_interrupt_enable:1;
+ unsigned char has_multibutton_select:1;
+ unsigned char has_tx_rx_map:1;
+ unsigned char has_perbutton_threshold:1;
+ unsigned char has_release_threshold:1;
+ unsigned char has_strongestbtn_hysteresis:1;
+ unsigned char has_filter_strength:1;
+ } __packed;
+ unsigned char data[2];
+ };
+};
+
+struct synaptics_rmi4_f1a_query_4 {
+ union {
+ struct {
+ unsigned char has_ctrl19:1;
+ unsigned char f1a_query4_b1__4:4;
+ unsigned char has_ctrl24:1;
+ unsigned char f1a_query4_b6__7:2;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct synaptics_rmi4_f1a_control_0 {
+ union {
+ struct {
+ unsigned char multibutton_report:2;
+ unsigned char filter_mode:2;
+ unsigned char reserved:4;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct synaptics_rmi4_f1a_control {
+ struct synaptics_rmi4_f1a_control_0 general_control;
+ unsigned char button_int_enable;
+ unsigned char multi_button;
+ unsigned char *txrx_map;
+ unsigned char *button_threshold;
+ unsigned char button_release_threshold;
+ unsigned char strongest_button_hysteresis;
+ unsigned char filter_strength;
+};
+
+struct synaptics_rmi4_f1a_handle {
+ int button_bitmask_size;
+ unsigned char max_count;
+ unsigned char valid_button_count;
+ unsigned char *button_data_buffer;
+ unsigned char *button_map;
+ struct synaptics_rmi4_f1a_query button_query;
+ struct synaptics_rmi4_f1a_control button_control;
+};
+
+struct synaptics_rmi4_exp_fhandler {
+ struct synaptics_rmi4_exp_fn *exp_fn;
+ bool insert;
+ bool remove;
+ struct list_head link;
+};
+
+struct synaptics_rmi4_exp_fn_data {
+ bool initialized;
+ bool queue_work;
+ struct mutex mutex;
+ struct list_head list;
+ struct delayed_work work;
+ struct workqueue_struct *workqueue;
+ struct synaptics_rmi4_data *rmi4_data;
+};
+
+static struct synaptics_rmi4_exp_fn_data exp_data;
+
+static struct synaptics_dsx_button_map *vir_button_map;
+
+#ifdef USE_DATA_SERVER
+static pid_t synad_pid;
+static struct task_struct *synad_task;
+static struct siginfo interrupt_signal;
+#endif
+
+static struct device_attribute attrs[] = {
+ __ATTR(reset, 0220,
+ synaptics_rmi4_show_error,
+ synaptics_rmi4_f01_reset_store),
+ __ATTR(productinfo, 0444,
+ synaptics_rmi4_f01_productinfo_show,
+ synaptics_rmi4_store_error),
+ __ATTR(buildid, 0444,
+ synaptics_rmi4_f01_buildid_show,
+ synaptics_rmi4_store_error),
+ __ATTR(flashprog, 0444,
+ synaptics_rmi4_f01_flashprog_show,
+ synaptics_rmi4_store_error),
+ __ATTR(0dbutton, 0664,
+ synaptics_rmi4_0dbutton_show,
+ synaptics_rmi4_0dbutton_store),
+ __ATTR(suspend, 0220,
+ synaptics_rmi4_show_error,
+ synaptics_rmi4_suspend_store),
+ __ATTR(wake_gesture, 0664,
+ synaptics_rmi4_wake_gesture_show,
+ synaptics_rmi4_wake_gesture_store),
+#ifdef USE_DATA_SERVER
+ __ATTR(synad_pid, 0220,
+ synaptics_rmi4_show_error,
+ synaptics_rmi4_synad_pid_store),
+#endif
+};
+
+static struct kobj_attribute virtual_key_map_attr = {
+ .attr = {
+ .name = VIRTUAL_KEY_MAP_FILE_NAME,
+ .mode = 0444,
+ },
+ .show = synaptics_rmi4_virtual_key_map_show,
+};
+
+static ssize_t synaptics_rmi4_f01_reset_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned int reset;
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+ if (kstrtouint(buf, 10, &reset) != 1)
+ return -EINVAL;
+
+ if (reset != 1)
+ return -EINVAL;
+
+ retval = synaptics_rmi4_reset_device(rmi4_data, false);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to issue reset command, error = %d\n",
+ __func__, retval);
+ return retval;
+ }
+
+ return count;
+}
+
+static ssize_t synaptics_rmi4_f01_productinfo_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "0x%02x 0x%02x\n",
+ (rmi4_data->rmi4_mod_info.product_info[0]),
+ (rmi4_data->rmi4_mod_info.product_info[1]));
+}
+
+static ssize_t synaptics_rmi4_f01_buildid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ rmi4_data->firmware_id);
+}
+
+static ssize_t synaptics_rmi4_f01_flashprog_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+ struct synaptics_rmi4_f01_device_status device_status;
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_data_base_addr,
+ device_status.data,
+ sizeof(device_status.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read device status, error = %d\n",
+ __func__, retval);
+ return retval;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ device_status.flash_prog);
+}
+
+static ssize_t synaptics_rmi4_0dbutton_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ rmi4_data->button_0d_enabled);
+}
+
+static ssize_t synaptics_rmi4_0dbutton_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned int input;
+ unsigned char ii;
+ unsigned char intr_enable;
+ struct synaptics_rmi4_fn *fhandler;
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+ struct synaptics_rmi4_device_info *rmi;
+
+ rmi = &(rmi4_data->rmi4_mod_info);
+
+ if (kstrtouint(buf, 10, &input) != 1)
+ return -EINVAL;
+
+ input = input > 0 ? 1 : 0;
+
+ if (rmi4_data->button_0d_enabled == input)
+ return count;
+
+ if (list_empty(&rmi->support_fn_list))
+ return -ENODEV;
+
+ list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+ if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) {
+ ii = fhandler->intr_reg_num;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr + 1 + ii,
+ &intr_enable,
+ sizeof(intr_enable));
+ if (retval < 0)
+ return retval;
+
+ if (input == 1)
+ intr_enable |= fhandler->intr_mask;
+ else
+ intr_enable &= ~fhandler->intr_mask;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr + 1 + ii,
+ &intr_enable,
+ sizeof(intr_enable));
+ if (retval < 0)
+ return retval;
+ }
+ }
+
+ rmi4_data->button_0d_enabled = input;
+
+ return count;
+}
+
+static ssize_t synaptics_rmi4_suspend_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned int input;
+
+ if (kstrtouint(buf, 10, &input) != 1)
+ return -EINVAL;
+
+ if (input == 1)
+ synaptics_rmi4_suspend(dev);
+ else if (input == 0)
+ synaptics_rmi4_resume(dev);
+ else
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t synaptics_rmi4_wake_gesture_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ rmi4_data->enable_wakeup_gesture);
+}
+
+static ssize_t synaptics_rmi4_wake_gesture_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned int input;
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+ if (kstrtouint(buf, 10, &input) != 1)
+ return -EINVAL;
+
+ input = input > 0 ? 1 : 0;
+
+ if (rmi4_data->f11_wakeup_gesture || rmi4_data->f12_wakeup_gesture)
+ rmi4_data->enable_wakeup_gesture = input;
+
+ return count;
+}
+
+#ifdef USE_DATA_SERVER
+static ssize_t synaptics_rmi4_synad_pid_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned int input;
+
+ if (kstrtouint(buf, 10, &input) != 1)
+ return -EINVAL;
+
+ synad_pid = input;
+
+ if (synad_pid) {
+ synad_task = pid_task(find_vpid(synad_pid), PIDTYPE_PID);
+ if (!synad_task)
+ return -EINVAL;
+ }
+
+ return count;
+}
+#endif
+
+static ssize_t synaptics_rmi4_virtual_key_map_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ int ii;
+ int cnt;
+ int count = 0;
+
+ for (ii = 0; ii < vir_button_map->nbuttons; ii++) {
+ cnt = snprintf(buf, PAGE_SIZE - count, "0x01:%d:%d:%d:%d:%d\n",
+ vir_button_map->map[ii * 5 + 0],
+ vir_button_map->map[ii * 5 + 1],
+ vir_button_map->map[ii * 5 + 2],
+ vir_button_map->map[ii * 5 + 3],
+ vir_button_map->map[ii * 5 + 4]);
+ buf += cnt;
+ count += cnt;
+ }
+
+ return count;
+}
+
+static int synaptics_rmi4_f11_wg(struct synaptics_rmi4_data *rmi4_data,
+ bool enable)
+{
+ int retval;
+ unsigned char reporting_control;
+ struct synaptics_rmi4_fn *fhandler;
+ struct synaptics_rmi4_device_info *rmi;
+
+ rmi = &(rmi4_data->rmi4_mod_info);
+
+ list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+ if (fhandler->fn_number == SYNAPTICS_RMI4_F11)
+ break;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.ctrl_base,
+ &reporting_control,
+ sizeof(reporting_control));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to change reporting mode\n",
+ __func__);
+ return retval;
+ }
+
+ reporting_control = (reporting_control & ~MASK_3BIT);
+ if (enable)
+ reporting_control |= F11_WAKEUP_GESTURE_MODE;
+ else
+ reporting_control |= F11_CONTINUOUS_MODE;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ fhandler->full_addr.ctrl_base,
+ &reporting_control,
+ sizeof(reporting_control));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to change reporting mode\n",
+ __func__);
+ return retval;
+ }
+
+ return retval;
+}
+
+static int synaptics_rmi4_f12_wg(struct synaptics_rmi4_data *rmi4_data,
+ bool enable)
+{
+ int retval;
+ unsigned char offset;
+ unsigned char reporting_control[3];
+ struct synaptics_rmi4_f12_extra_data *extra_data;
+ struct synaptics_rmi4_fn *fhandler;
+ struct synaptics_rmi4_device_info *rmi;
+
+ rmi = &(rmi4_data->rmi4_mod_info);
+
+ list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+ if (fhandler->fn_number == SYNAPTICS_RMI4_F12)
+ break;
+ }
+
+ extra_data = (struct synaptics_rmi4_f12_extra_data *)fhandler->extra;
+ offset = extra_data->ctrl20_offset;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.ctrl_base + offset,
+ reporting_control,
+ sizeof(reporting_control));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to change reporting mode\n",
+ __func__);
+ return retval;
+ }
+
+ if (enable)
+ reporting_control[rmi4_data->set_wakeup_gesture] = F12_WAKEUP_GESTURE_MODE;
+ else
+ reporting_control[rmi4_data->set_wakeup_gesture] = F12_CONTINUOUS_MODE;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ fhandler->full_addr.ctrl_base + offset,
+ reporting_control,
+ sizeof(reporting_control));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to change reporting mode\n",
+ __func__);
+ return retval;
+ }
+
+ return retval;
+}
+
+static void synaptics_rmi4_wakeup_gesture(struct synaptics_rmi4_data *rmi4_data,
+ bool enable)
+{
+ if (rmi4_data->f11_wakeup_gesture)
+ synaptics_rmi4_f11_wg(rmi4_data, enable);
+ else if (rmi4_data->f12_wakeup_gesture)
+ synaptics_rmi4_f12_wg(rmi4_data, enable);
+}
+
+static int synaptics_rmi4_f11_abs_report(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler)
+{
+ int retval;
+ unsigned char touch_count = 0; /* number of touch points */
+ unsigned char reg_index;
+ unsigned char finger;
+ unsigned char fingers_supported;
+ unsigned char num_of_finger_status_regs;
+ unsigned char finger_shift;
+ unsigned char finger_status;
+ unsigned char finger_status_reg[3];
+ unsigned char detected_gestures;
+ unsigned short data_addr;
+ unsigned short data_offset;
+ int x;
+ int y;
+ int wx;
+ int wy;
+ int temp;
+ struct synaptics_rmi4_f11_data_1_5 data;
+ struct synaptics_rmi4_f11_extra_data *extra_data;
+
+ /*
+ * The number of finger status registers is determined by the
+ * maximum number of fingers supported - 2 bits per finger. So
+ * the number of finger status registers to read is:
+ * register_count = ceil(max_num_of_fingers / 4)
+ */
+ fingers_supported = fhandler->num_of_data_points;
+ num_of_finger_status_regs = (fingers_supported + 3) / 4;
+ data_addr = fhandler->full_addr.data_base;
+
+ extra_data = (struct synaptics_rmi4_f11_extra_data *)fhandler->extra;
+
+ if (rmi4_data->suspend && rmi4_data->enable_wakeup_gesture) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ data_addr + extra_data->data38_offset,
+ &detected_gestures,
+ sizeof(detected_gestures));
+ if (retval < 0)
+ return 0;
+
+ if (detected_gestures) {
+ input_report_key(rmi4_data->input_dev, KEY_WAKEUP, 1);
+ input_sync(rmi4_data->input_dev);
+ input_report_key(rmi4_data->input_dev, KEY_WAKEUP, 0);
+ input_sync(rmi4_data->input_dev);
+ rmi4_data->suspend = false;
+ }
+/* synaptics_rmi4_wakeup_gesture(rmi4_data, false); */
+ return 0;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ data_addr,
+ finger_status_reg,
+ num_of_finger_status_regs);
+ if (retval < 0)
+ return 0;
+
+ mutex_lock(&(rmi4_data->rmi4_report_mutex));
+
+ for (finger = 0; finger < fingers_supported; finger++) {
+ reg_index = finger / 4;
+ finger_shift = (finger % 4) * 2;
+ finger_status = (finger_status_reg[reg_index] >> finger_shift)
+ & MASK_2BIT;
+
+ /*
+ * Each 2-bit finger status field represents the following:
+ * 00 = finger not present
+ * 01 = finger present and data accurate
+ * 10 = finger present but data may be inaccurate
+ * 11 = reserved
+ */
+#ifdef TYPE_B_PROTOCOL
+ input_mt_slot(rmi4_data->input_dev, finger);
+ input_mt_report_slot_state(rmi4_data->input_dev,
+ MT_TOOL_FINGER, finger_status);
+#endif
+
+ if (finger_status) {
+ data_offset = data_addr +
+ num_of_finger_status_regs +
+ (finger * sizeof(data.data));
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ data_offset,
+ data.data,
+ sizeof(data.data));
+ if (retval < 0) {
+ touch_count = 0;
+ goto exit;
+ }
+
+ x = (data.x_position_11_4 << 4) | data.x_position_3_0;
+ y = (data.y_position_11_4 << 4) | data.y_position_3_0;
+ wx = data.wx;
+ wy = data.wy;
+
+ if (rmi4_data->hw_if->board_data->swap_axes) {
+ temp = x;
+ x = y;
+ y = temp;
+ temp = wx;
+ wx = wy;
+ wy = temp;
+ }
+
+ if (rmi4_data->hw_if->board_data->x_flip)
+ x = rmi4_data->sensor_max_x - x;
+ if (rmi4_data->hw_if->board_data->y_flip)
+ y = rmi4_data->sensor_max_y - y;
+
+ input_report_key(rmi4_data->input_dev,
+ BTN_TOUCH, 1);
+ input_report_key(rmi4_data->input_dev,
+ BTN_TOOL_FINGER, 1);
+ input_report_abs(rmi4_data->input_dev,
+ ABS_MT_POSITION_X, x);
+ input_report_abs(rmi4_data->input_dev,
+ ABS_MT_POSITION_Y, y);
+#ifdef REPORT_2D_W
+ input_report_abs(rmi4_data->input_dev,
+ ABS_MT_TOUCH_MAJOR, max(wx, wy));
+ input_report_abs(rmi4_data->input_dev,
+ ABS_MT_TOUCH_MINOR, min(wx, wy));
+#endif
+#ifndef TYPE_B_PROTOCOL
+ input_mt_sync(rmi4_data->input_dev);
+#endif
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Finger %d: status = 0x%02x, x = %d, y = %d, wx = %d, wy = %d\n",
+ __func__, finger,
+ finger_status,
+ x, y, wx, wy);
+
+ touch_count++;
+ }
+ }
+
+ if (touch_count == 0) {
+ input_report_key(rmi4_data->input_dev,
+ BTN_TOUCH, 0);
+ input_report_key(rmi4_data->input_dev,
+ BTN_TOOL_FINGER, 0);
+#ifndef TYPE_B_PROTOCOL
+ input_mt_sync(rmi4_data->input_dev);
+#endif
+ }
+
+ input_sync(rmi4_data->input_dev);
+
+exit:
+ mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+
+ return touch_count;
+}
+
+static int synaptics_rmi4_f12_abs_report(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler)
+{
+ int retval;
+ unsigned char touch_count = 0; /* number of touch points */
+ unsigned char index;
+ unsigned char finger;
+ unsigned char fingers_to_process;
+ unsigned char finger_status;
+ unsigned char size_of_2d_data;
+ unsigned char gesture_type;
+ unsigned short data_addr;
+ int x;
+ int y;
+ int wx;
+ int wy;
+ int temp;
+#if defined(REPORT_2D_PRESSURE) || defined(F51_DISCRETE_FORCE)
+ int pressure;
+#endif
+#ifdef REPORT_2D_PRESSURE
+ unsigned char f_fingers;
+ unsigned char f_lsb;
+ unsigned char f_msb;
+ unsigned char *f_data;
+#endif
+#ifdef F51_DISCRETE_FORCE
+ unsigned char force_level;
+#endif
+ struct synaptics_rmi4_f12_extra_data *extra_data;
+ struct synaptics_rmi4_f12_finger_data *data;
+ struct synaptics_rmi4_f12_finger_data *finger_data;
+ static unsigned char finger_presence;
+ static unsigned char stylus_presence;
+#ifdef F12_DATA_15_WORKAROUND
+ static unsigned char objects_already_present;
+#endif
+
+ fingers_to_process = fhandler->num_of_data_points;
+ data_addr = fhandler->full_addr.data_base;
+ extra_data = (struct synaptics_rmi4_f12_extra_data *)fhandler->extra;
+ size_of_2d_data = sizeof(struct synaptics_rmi4_f12_finger_data);
+
+ if (rmi4_data->suspend && rmi4_data->enable_wakeup_gesture) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ data_addr + extra_data->data4_offset,
+ rmi4_data->gesture_detection,
+ sizeof(rmi4_data->gesture_detection));
+ if (retval < 0)
+ return 0;
+
+ gesture_type = rmi4_data->gesture_detection[0];
+
+ if (gesture_type && gesture_type != F12_UDG_DETECT) {
+ input_report_key(rmi4_data->input_dev, KEY_WAKEUP, 1);
+ input_sync(rmi4_data->input_dev);
+ input_report_key(rmi4_data->input_dev, KEY_WAKEUP, 0);
+ input_sync(rmi4_data->input_dev);
+ /* synaptics_rmi4_wakeup_gesture(rmi4_data, false); */
+ /* rmi4_data->suspend = false; */
+ }
+
+ return 0;
+ }
+
+ /* Determine the total number of fingers to process */
+ if (extra_data->data15_size) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ data_addr + extra_data->data15_offset,
+ extra_data->data15_data,
+ extra_data->data15_size);
+ if (retval < 0)
+ return 0;
+
+ /* Start checking from the highest bit */
+ index = extra_data->data15_size - 1; /* Highest byte */
+ finger = (fingers_to_process - 1) % 8; /* Highest bit */
+ do {
+ if (extra_data->data15_data[index] & (1 << finger))
+ break;
+
+ if (finger) {
+ finger--;
+ } else if (index > 0) {
+ index--; /* Move to the next lower byte */
+ finger = 7;
+ }
+
+ fingers_to_process--;
+ } while (fingers_to_process);
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Number of fingers to process = %d\n",
+ __func__, fingers_to_process);
+ }
+
+#ifdef F12_DATA_15_WORKAROUND
+ fingers_to_process = max(fingers_to_process, objects_already_present);
+#endif
+
+ if (!fingers_to_process) {
+ synaptics_rmi4_free_fingers(rmi4_data);
+ finger_presence = 0;
+ stylus_presence = 0;
+ return 0;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ data_addr + extra_data->data1_offset,
+ (unsigned char *)fhandler->data,
+ fingers_to_process * size_of_2d_data);
+ if (retval < 0)
+ return 0;
+
+ data = (struct synaptics_rmi4_f12_finger_data *)fhandler->data;
+
+#ifdef REPORT_2D_PRESSURE
+ if (rmi4_data->report_pressure) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ data_addr + extra_data->data29_offset,
+ extra_data->data29_data,
+ extra_data->data29_size);
+ if (retval < 0)
+ return 0;
+ }
+#endif
+
+ mutex_lock(&(rmi4_data->rmi4_report_mutex));
+
+ for (finger = 0; finger < fingers_to_process; finger++) {
+ finger_data = data + finger;
+ finger_status = finger_data->object_type_and_status;
+
+#ifdef F12_DATA_15_WORKAROUND
+ objects_already_present = finger + 1;
+#endif
+
+ x = (finger_data->x_msb << 8) | (finger_data->x_lsb);
+ y = (finger_data->y_msb << 8) | (finger_data->y_lsb);
+#ifdef REPORT_2D_W
+ wx = finger_data->wx;
+ wy = finger_data->wy;
+#endif
+
+ if (rmi4_data->hw_if->board_data->swap_axes) {
+ temp = x;
+ x = y;
+ y = temp;
+ temp = wx;
+ wx = wy;
+ wy = temp;
+ }
+
+ if (rmi4_data->hw_if->board_data->x_flip)
+ x = rmi4_data->sensor_max_x - x;
+ if (rmi4_data->hw_if->board_data->y_flip)
+ y = rmi4_data->sensor_max_y - y;
+
+ switch (finger_status) {
+ case F12_FINGER_STATUS:
+ case F12_GLOVED_FINGER_STATUS:
+ /* Stylus has priority over fingers */
+ if (stylus_presence)
+ break;
+#ifdef TYPE_B_PROTOCOL
+ input_mt_slot(rmi4_data->input_dev, finger);
+ input_mt_report_slot_state(rmi4_data->input_dev,
+ MT_TOOL_FINGER, 1);
+#endif
+
+ input_report_key(rmi4_data->input_dev,
+ BTN_TOUCH, 1);
+ input_report_key(rmi4_data->input_dev,
+ BTN_TOOL_FINGER, 1);
+ input_report_abs(rmi4_data->input_dev,
+ ABS_MT_POSITION_X, x);
+ input_report_abs(rmi4_data->input_dev,
+ ABS_MT_POSITION_Y, y);
+#ifdef REPORT_2D_W
+ if (rmi4_data->wedge_sensor) {
+ input_report_abs(rmi4_data->input_dev,
+ ABS_MT_TOUCH_MAJOR, wx);
+ input_report_abs(rmi4_data->input_dev,
+ ABS_MT_TOUCH_MINOR, wx);
+ } else {
+ input_report_abs(rmi4_data->input_dev,
+ ABS_MT_TOUCH_MAJOR,
+ max(wx, wy));
+ input_report_abs(rmi4_data->input_dev,
+ ABS_MT_TOUCH_MINOR,
+ min(wx, wy));
+ }
+#endif
+#ifdef REPORT_2D_PRESSURE
+ if (rmi4_data->report_pressure) {
+ f_fingers = extra_data->data29_size / 2;
+ f_data = extra_data->data29_data;
+ if (finger + 1 > f_fingers) {
+ pressure = 1;
+ } else {
+ f_lsb = finger * 2;
+ f_msb = finger * 2 + 1;
+ pressure = (int)f_data[f_lsb] << 0 |
+ (int)f_data[f_msb] << 8;
+ }
+ pressure = pressure > 0 ? pressure : 1;
+ if (pressure > rmi4_data->force_max)
+ pressure = rmi4_data->force_max;
+ input_report_abs(rmi4_data->input_dev,
+ ABS_MT_PRESSURE, pressure);
+ }
+#elif defined(F51_DISCRETE_FORCE)
+ if (finger == 0) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ FORCE_LEVEL_ADDR,
+ &force_level,
+ sizeof(force_level));
+ if (retval < 0)
+ return 0;
+ pressure = force_level > 0 ? force_level : 1;
+ } else {
+ pressure = 1;
+ }
+ input_report_abs(rmi4_data->input_dev,
+ ABS_MT_PRESSURE, pressure);
+#endif
+#ifndef TYPE_B_PROTOCOL
+ input_mt_sync(rmi4_data->input_dev);
+#endif
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Finger %d: status = 0x%02x, x = %d, y = %d, wx = %d, wy = %d\n",
+ __func__, finger,
+ finger_status,
+ x, y, wx, wy);
+
+ finger_presence = 1;
+ touch_count++;
+ break;
+ case F12_PALM_STATUS:
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Finger %d: x = %d, y = %d, wx = %d, wy = %d\n",
+ __func__, finger,
+ x, y, wx, wy);
+ break;
+ case F12_STYLUS_STATUS:
+ case F12_ERASER_STATUS:
+ if (!rmi4_data->stylus_enable)
+ break;
+ /* Stylus has priority over fingers */
+ if (finger_presence) {
+ mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+ synaptics_rmi4_free_fingers(rmi4_data);
+ mutex_lock(&(rmi4_data->rmi4_report_mutex));
+ finger_presence = 0;
+ }
+ if (stylus_presence) {/* Allow one stylus at a timee */
+ if (finger + 1 != stylus_presence)
+ break;
+ }
+ input_report_key(rmi4_data->stylus_dev,
+ BTN_TOUCH, 1);
+ if (finger_status == F12_STYLUS_STATUS) {
+ input_report_key(rmi4_data->stylus_dev,
+ BTN_TOOL_PEN, 1);
+ } else {
+ input_report_key(rmi4_data->stylus_dev,
+ BTN_TOOL_RUBBER, 1);
+ }
+ input_report_abs(rmi4_data->stylus_dev,
+ ABS_X, x);
+ input_report_abs(rmi4_data->stylus_dev,
+ ABS_Y, y);
+ input_sync(rmi4_data->stylus_dev);
+
+ stylus_presence = finger + 1;
+ touch_count++;
+ break;
+ default:
+#ifdef TYPE_B_PROTOCOL
+ input_mt_slot(rmi4_data->input_dev, finger);
+ input_mt_report_slot_state(rmi4_data->input_dev,
+ MT_TOOL_FINGER, 0);
+#endif
+ break;
+ }
+ }
+
+ if (touch_count == 0) {
+ finger_presence = 0;
+#ifdef F12_DATA_15_WORKAROUND
+ objects_already_present = 0;
+#endif
+ input_report_key(rmi4_data->input_dev,
+ BTN_TOUCH, 0);
+ input_report_key(rmi4_data->input_dev,
+ BTN_TOOL_FINGER, 0);
+#ifndef TYPE_B_PROTOCOL
+ input_mt_sync(rmi4_data->input_dev);
+#endif
+
+ if (rmi4_data->stylus_enable) {
+ stylus_presence = 0;
+ input_report_key(rmi4_data->stylus_dev,
+ BTN_TOUCH, 0);
+ input_report_key(rmi4_data->stylus_dev,
+ BTN_TOOL_PEN, 0);
+ if (rmi4_data->eraser_enable) {
+ input_report_key(rmi4_data->stylus_dev,
+ BTN_TOOL_RUBBER, 0);
+ }
+ input_sync(rmi4_data->stylus_dev);
+ }
+ }
+
+ input_sync(rmi4_data->input_dev);
+
+ mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+
+ return touch_count;
+}
+
+static int synaptics_rmi4_f1a_report(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler)
+{
+ int retval;
+ unsigned char touch_count = 0;
+ unsigned char button;
+ unsigned char index;
+ unsigned char shift;
+ unsigned char status;
+ unsigned char *data;
+ unsigned short data_addr = fhandler->full_addr.data_base;
+ struct synaptics_rmi4_f1a_handle *f1a = fhandler->data;
+ static unsigned char do_once = 1;
+ static bool current_status[MAX_NUMBER_OF_BUTTONS];
+#ifdef NO_0D_WHILE_2D
+ static bool before_2d_status[MAX_NUMBER_OF_BUTTONS];
+ static bool while_2d_status[MAX_NUMBER_OF_BUTTONS];
+#endif
+
+ if (do_once) {
+ memset(current_status, 0, sizeof(current_status));
+#ifdef NO_0D_WHILE_2D
+ memset(before_2d_status, 0, sizeof(before_2d_status));
+ memset(while_2d_status, 0, sizeof(while_2d_status));
+#endif
+ do_once = 0;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ data_addr,
+ f1a->button_data_buffer,
+ f1a->button_bitmask_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read button data registers\n",
+ __func__);
+ return retval;
+ }
+
+ data = f1a->button_data_buffer;
+
+ mutex_lock(&(rmi4_data->rmi4_report_mutex));
+
+ for (button = 0; button < f1a->valid_button_count; button++) {
+ index = button / 8;
+ shift = button % 8;
+ status = ((data[index] >> shift) & MASK_1BIT);
+
+ if (current_status[button] == status)
+ continue;
+ else
+ current_status[button] = status;
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Button %d (code %d) ->%d\n",
+ __func__, button,
+ f1a->button_map[button],
+ status);
+#ifdef NO_0D_WHILE_2D
+ if (rmi4_data->fingers_on_2d == false) {
+ if (status == 1) {
+ before_2d_status[button] = 1;
+ } else {
+ if (while_2d_status[button] == 1) {
+ while_2d_status[button] = 0;
+ continue;
+ } else {
+ before_2d_status[button] = 0;
+ }
+ }
+ touch_count++;
+ input_report_key(rmi4_data->input_dev,
+ f1a->button_map[button],
+ status);
+ } else {
+ if (before_2d_status[button] == 1) {
+ before_2d_status[button] = 0;
+ touch_count++;
+ input_report_key(rmi4_data->input_dev,
+ f1a->button_map[button],
+ status);
+ } else {
+ if (status == 1)
+ while_2d_status[button] = 1;
+ else
+ while_2d_status[button] = 0;
+ }
+ }
+#else
+ touch_count++;
+ input_report_key(rmi4_data->input_dev,
+ f1a->button_map[button],
+ status);
+#endif
+ }
+
+ if (touch_count)
+ input_sync(rmi4_data->input_dev);
+
+ mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+
+ return retval;
+}
+
+static void synaptics_rmi4_report_touch(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler)
+{
+ unsigned char touch_count_2d;
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Function %02x reporting\n",
+ __func__, fhandler->fn_number);
+
+ switch (fhandler->fn_number) {
+ case SYNAPTICS_RMI4_F11:
+ touch_count_2d = synaptics_rmi4_f11_abs_report(rmi4_data,
+ fhandler);
+
+ if (touch_count_2d)
+ rmi4_data->fingers_on_2d = true;
+ else
+ rmi4_data->fingers_on_2d = false;
+ break;
+ case SYNAPTICS_RMI4_F12:
+ touch_count_2d = synaptics_rmi4_f12_abs_report(rmi4_data,
+ fhandler);
+
+ if (touch_count_2d)
+ rmi4_data->fingers_on_2d = true;
+ else
+ rmi4_data->fingers_on_2d = false;
+ break;
+ case SYNAPTICS_RMI4_F1A:
+ synaptics_rmi4_f1a_report(rmi4_data, fhandler);
+ break;
+#ifdef USE_DATA_SERVER
+ case SYNAPTICS_RMI4_F21:
+ if (synad_pid)
+ send_sig_info(SIGIO, &interrupt_signal, synad_task);
+ break;
+#endif
+ default:
+ break;
+ }
+}
+
+static int synaptics_rmi4_sensor_report(struct synaptics_rmi4_data *rmi4_data,
+ bool report)
+{
+ int retval;
+ unsigned char data[MAX_INTR_REGISTERS + 1];
+ unsigned char *intr = &data[1];
+ bool was_in_bl_mode;
+ struct synaptics_rmi4_f01_device_status status;
+ struct synaptics_rmi4_fn *fhandler;
+ struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+ struct synaptics_rmi4_device_info *rmi;
+
+ rmi = &(rmi4_data->rmi4_mod_info);
+
+ /*
+ * Get interrupt status information from F01 Data1 register to
+ * determine the source(s) that are flagging the interrupt.
+ */
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_data_base_addr,
+ data,
+ rmi4_data->num_of_intr_regs + 1);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read interrupt status\n",
+ __func__);
+ return retval;
+ }
+
+ status.data[0] = data[0];
+ if (status.status_code == STATUS_CRC_IN_PROGRESS) {
+ retval = synaptics_rmi4_check_status(rmi4_data,
+ &was_in_bl_mode);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to check status\n",
+ __func__);
+ return retval;
+ }
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_data_base_addr,
+ status.data,
+ sizeof(status.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read device status\n",
+ __func__);
+ return retval;
+ }
+ }
+ if (status.unconfigured && !status.flash_prog) {
+ pr_notice("%s: spontaneous reset detected\n", __func__);
+ retval = synaptics_rmi4_reinit_device(rmi4_data);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to reinit device\n",
+ __func__);
+ }
+ }
+
+ if (!report)
+ return retval;
+
+ /*
+ * Traverse the function handler list and service the source(s)
+ * of the interrupt accordingly.
+ */
+ if (!list_empty(&rmi->support_fn_list)) {
+ list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+ if (fhandler->num_of_data_sources) {
+ if (fhandler->intr_mask &
+ intr[fhandler->intr_reg_num]) {
+ synaptics_rmi4_report_touch(rmi4_data,
+ fhandler);
+ }
+ }
+ }
+ }
+
+ mutex_lock(&exp_data.mutex);
+ if (!list_empty(&exp_data.list)) {
+ list_for_each_entry(exp_fhandler, &exp_data.list, link) {
+ if (!exp_fhandler->insert &&
+ !exp_fhandler->remove &&
+ (exp_fhandler->exp_fn->attn != NULL))
+ exp_fhandler->exp_fn->attn(rmi4_data, intr[0]);
+ }
+ }
+ mutex_unlock(&exp_data.mutex);
+
+ return retval;
+}
+
+static irqreturn_t synaptics_rmi4_irq(int irq, void *data)
+{
+ struct synaptics_rmi4_data *rmi4_data = data;
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ if (gpio_get_value(bdata->irq_gpio) != bdata->irq_on_state)
+ goto exit;
+
+ synaptics_rmi4_sensor_report(rmi4_data, true);
+
+exit:
+ return IRQ_HANDLED;
+}
+
+static int synaptics_rmi4_int_enable(struct synaptics_rmi4_data *rmi4_data,
+ bool enable)
+{
+ int retval = 0;
+ unsigned char ii;
+ unsigned char zero = 0x00;
+ unsigned char *intr_mask;
+ unsigned short intr_addr;
+
+ intr_mask = rmi4_data->intr_mask;
+
+ for (ii = 0; ii < rmi4_data->num_of_intr_regs; ii++) {
+ if (intr_mask[ii] != 0x00) {
+ intr_addr = rmi4_data->f01_ctrl_base_addr + 1 + ii;
+ if (enable) {
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ intr_addr,
+ &(intr_mask[ii]),
+ sizeof(intr_mask[ii]));
+ if (retval < 0)
+ return retval;
+ } else {
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ intr_addr,
+ &zero,
+ sizeof(zero));
+ if (retval < 0)
+ return retval;
+ }
+ }
+ }
+
+ return retval;
+}
+
+static int synaptics_rmi4_irq_enable(struct synaptics_rmi4_data *rmi4_data,
+ bool enable, bool attn_only)
+{
+ int retval = 0;
+ unsigned char data[MAX_INTR_REGISTERS];
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ mutex_lock(&(rmi4_data->rmi4_irq_enable_mutex));
+
+ if (attn_only) {
+ retval = synaptics_rmi4_int_enable(rmi4_data, enable);
+ goto exit;
+ }
+
+ if (enable) {
+ if (rmi4_data->irq_enabled) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Interrupt already enabled\n",
+ __func__);
+ goto exit;
+ }
+
+ retval = synaptics_rmi4_int_enable(rmi4_data, false);
+ if (retval < 0)
+ goto exit;
+
+ /* Clear interrupts */
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_data_base_addr + 1,
+ data,
+ rmi4_data->num_of_intr_regs);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read interrupt status\n",
+ __func__);
+ goto exit;
+ }
+
+ retval = request_threaded_irq(rmi4_data->irq, NULL,
+ synaptics_rmi4_irq, bdata->irq_flags,
+ PLATFORM_DRIVER_NAME, rmi4_data);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create irq thread\n",
+ __func__);
+ goto exit;
+ }
+
+ retval = synaptics_rmi4_int_enable(rmi4_data, true);
+ if (retval < 0)
+ goto exit;
+
+ rmi4_data->irq_enabled = true;
+ } else {
+ if (rmi4_data->irq_enabled) {
+ disable_irq(rmi4_data->irq);
+ free_irq(rmi4_data->irq, rmi4_data);
+ rmi4_data->irq_enabled = false;
+ }
+ }
+
+exit:
+ mutex_unlock(&(rmi4_data->rmi4_irq_enable_mutex));
+
+ return retval;
+}
+
+static void synaptics_rmi4_set_intr_mask(struct synaptics_rmi4_fn *fhandler,
+ struct synaptics_rmi4_fn_desc *fd,
+ unsigned int intr_count)
+{
+ unsigned char ii;
+ unsigned char intr_offset;
+
+ fhandler->intr_reg_num = (intr_count + 7) / 8;
+ if (fhandler->intr_reg_num != 0)
+ fhandler->intr_reg_num -= 1;
+
+ /* Set an enable bit for each data source */
+ intr_offset = intr_count % 8;
+ fhandler->intr_mask = 0;
+ for (ii = intr_offset;
+ ii < (fd->intr_src_count + intr_offset);
+ ii++)
+ fhandler->intr_mask |= 1 << ii;
+}
+
+static int synaptics_rmi4_f01_init(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler,
+ struct synaptics_rmi4_fn_desc *fd,
+ unsigned int intr_count)
+{
+ fhandler->fn_number = fd->fn_number;
+ fhandler->num_of_data_sources = fd->intr_src_count;
+ fhandler->data = NULL;
+ fhandler->extra = NULL;
+
+ synaptics_rmi4_set_intr_mask(fhandler, fd, intr_count);
+
+ rmi4_data->f01_query_base_addr = fd->query_base_addr;
+ rmi4_data->f01_ctrl_base_addr = fd->ctrl_base_addr;
+ rmi4_data->f01_data_base_addr = fd->data_base_addr;
+ rmi4_data->f01_cmd_base_addr = fd->cmd_base_addr;
+
+ return 0;
+}
+
+static int synaptics_rmi4_f11_init(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler,
+ struct synaptics_rmi4_fn_desc *fd,
+ unsigned int intr_count)
+{
+ int retval;
+ int temp;
+ unsigned char offset;
+ unsigned char fingers_supported;
+ struct synaptics_rmi4_f11_extra_data *extra_data;
+ struct synaptics_rmi4_f11_query_0_5 query_0_5;
+ struct synaptics_rmi4_f11_query_7_8 query_7_8;
+ struct synaptics_rmi4_f11_query_9 query_9;
+ struct synaptics_rmi4_f11_query_12 query_12;
+ struct synaptics_rmi4_f11_query_27 query_27;
+ struct synaptics_rmi4_f11_ctrl_6_9 control_6_9;
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ fhandler->fn_number = fd->fn_number;
+ fhandler->num_of_data_sources = fd->intr_src_count;
+ fhandler->extra = kmalloc(sizeof(*extra_data), GFP_KERNEL);
+ if (!fhandler->extra) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for fhandler->extra\n",
+ __func__);
+ return -ENOMEM;
+ }
+ extra_data = (struct synaptics_rmi4_f11_extra_data *)fhandler->extra;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.query_base,
+ query_0_5.data,
+ sizeof(query_0_5.data));
+ if (retval < 0)
+ return retval;
+
+ /* Maximum number of fingers supported */
+ if (query_0_5.num_of_fingers <= 4)
+ fhandler->num_of_data_points = query_0_5.num_of_fingers + 1;
+ else if (query_0_5.num_of_fingers == 5)
+ fhandler->num_of_data_points = 10;
+
+ rmi4_data->num_of_fingers = fhandler->num_of_data_points;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.ctrl_base + 6,
+ control_6_9.data,
+ sizeof(control_6_9.data));
+ if (retval < 0)
+ return retval;
+
+ /* Maximum x and y */
+ rmi4_data->sensor_max_x = control_6_9.sensor_max_x_pos_7_0 |
+ (control_6_9.sensor_max_x_pos_11_8 << 8);
+ rmi4_data->sensor_max_y = control_6_9.sensor_max_y_pos_7_0 |
+ (control_6_9.sensor_max_y_pos_11_8 << 8);
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Function %02x max x = %d max y = %d\n",
+ __func__, fhandler->fn_number,
+ rmi4_data->sensor_max_x,
+ rmi4_data->sensor_max_y);
+
+ rmi4_data->max_touch_width = MAX_F11_TOUCH_WIDTH;
+
+ if (bdata->swap_axes) {
+ temp = rmi4_data->sensor_max_x;
+ rmi4_data->sensor_max_x = rmi4_data->sensor_max_y;
+ rmi4_data->sensor_max_y = temp;
+ }
+
+ synaptics_rmi4_set_intr_mask(fhandler, fd, intr_count);
+
+ fhandler->data = NULL;
+
+ offset = sizeof(query_0_5.data);
+
+ /* query 6 */
+ if (query_0_5.has_rel)
+ offset += 1;
+
+ /* queries 7 8 */
+ if (query_0_5.has_gestures) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.query_base + offset,
+ query_7_8.data,
+ sizeof(query_7_8.data));
+ if (retval < 0)
+ return retval;
+
+ offset += sizeof(query_7_8.data);
+ }
+
+ /* query 9 */
+ if (query_0_5.has_query_9) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.query_base + offset,
+ query_9.data,
+ sizeof(query_9.data));
+ if (retval < 0)
+ return retval;
+
+ offset += sizeof(query_9.data);
+ }
+
+ /* query 10 */
+ if (query_0_5.has_gestures && query_7_8.has_touch_shapes)
+ offset += 1;
+
+ /* query 11 */
+ if (query_0_5.has_query_11)
+ offset += 1;
+
+ /* query 12 */
+ if (query_0_5.has_query_12) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.query_base + offset,
+ query_12.data,
+ sizeof(query_12.data));
+ if (retval < 0)
+ return retval;
+
+ offset += sizeof(query_12.data);
+ }
+
+ /* query 13 */
+ if (query_0_5.has_jitter_filter)
+ offset += 1;
+
+ /* query 14 */
+ if (query_0_5.has_query_12 && query_12.has_general_information_2)
+ offset += 1;
+
+ /* queries 15 16 17 18 19 20 21 22 23 24 25 26*/
+ if (query_0_5.has_query_12 && query_12.has_physical_properties)
+ offset += 12;
+
+ /* query 27 */
+ if (query_0_5.has_query_27) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.query_base + offset,
+ query_27.data,
+ sizeof(query_27.data));
+ if (retval < 0)
+ return retval;
+
+ rmi4_data->f11_wakeup_gesture = query_27.has_wakeup_gesture;
+ }
+
+ if (!rmi4_data->f11_wakeup_gesture)
+ return retval;
+
+ /* data 0 */
+ fingers_supported = fhandler->num_of_data_points;
+ offset = (fingers_supported + 3) / 4;
+
+ /* data 1 2 3 4 5 */
+ offset += 5 * fingers_supported;
+
+ /* data 6 7 */
+ if (query_0_5.has_rel)
+ offset += 2 * fingers_supported;
+
+ /* data 8 */
+ if (query_0_5.has_gestures && query_7_8.data[0])
+ offset += 1;
+
+ /* data 9 */
+ if (query_0_5.has_gestures && (query_7_8.data[0] || query_7_8.data[1]))
+ offset += 1;
+
+ /* data 10 */
+ if (query_0_5.has_gestures &&
+ (query_7_8.has_pinch || query_7_8.has_flick))
+ offset += 1;
+
+ /* data 11 12 */
+ if (query_0_5.has_gestures &&
+ (query_7_8.has_flick || query_7_8.has_rotate))
+ offset += 2;
+
+ /* data 13 */
+ if (query_0_5.has_gestures && query_7_8.has_touch_shapes)
+ offset += (fingers_supported + 3) / 4;
+
+ /* data 14 15 */
+ if (query_0_5.has_gestures &&
+ (query_7_8.has_scroll_zones ||
+ query_7_8.has_multi_finger_scroll ||
+ query_7_8.has_chiral_scroll))
+ offset += 2;
+
+ /* data 16 17 */
+ if (query_0_5.has_gestures &&
+ (query_7_8.has_scroll_zones &&
+ query_7_8.individual_scroll_zones))
+ offset += 2;
+
+ /* data 18 19 20 21 22 23 24 25 26 27 */
+ if (query_0_5.has_query_9 && query_9.has_contact_geometry)
+ offset += 10 * fingers_supported;
+
+ /* data 28 */
+ if (query_0_5.has_bending_correction ||
+ query_0_5.has_large_object_suppression)
+ offset += 1;
+
+ /* data 29 30 31 */
+ if (query_0_5.has_query_9 && query_9.has_pen_hover_discrimination)
+ offset += 3;
+
+ /* data 32 */
+ if (query_0_5.has_query_12 &&
+ query_12.has_small_object_detection_tuning)
+ offset += 1;
+
+ /* data 33 34 */
+ if (query_0_5.has_query_27 && query_27.f11_query27_b0)
+ offset += 2;
+
+ /* data 35 */
+ if (query_0_5.has_query_12 && query_12.has_8bit_w)
+ offset += fingers_supported;
+
+ /* data 36 */
+ if (query_0_5.has_bending_correction)
+ offset += 1;
+
+ /* data 37 */
+ if (query_0_5.has_query_27 && query_27.has_data_37)
+ offset += 1;
+
+ /* data 38 */
+ if (query_0_5.has_query_27 && query_27.has_wakeup_gesture)
+ extra_data->data38_offset = offset;
+
+ return retval;
+}
+
+static int synaptics_rmi4_f12_set_enables(struct synaptics_rmi4_data *rmi4_data,
+ unsigned short ctrl28)
+{
+ int retval;
+ static unsigned short ctrl_28_address;
+
+ if (ctrl28)
+ ctrl_28_address = ctrl28;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ ctrl_28_address,
+ &rmi4_data->report_enable,
+ sizeof(rmi4_data->report_enable));
+ if (retval < 0)
+ return retval;
+
+ return retval;
+}
+
+static int synaptics_rmi4_f12_find_sub(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler,
+ unsigned char *presence, unsigned char presence_size,
+ unsigned char structure_offset, unsigned char reg,
+ unsigned char sub)
+{
+ int retval;
+ unsigned char cnt;
+ unsigned char regnum;
+ unsigned char bitnum;
+ unsigned char p_index;
+ unsigned char s_index;
+ unsigned char offset;
+ unsigned char max_reg;
+ unsigned char *structure;
+
+ max_reg = (presence_size - 1) * 8 - 1;
+
+ if (reg > max_reg) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Register number (%d) over limit\n",
+ __func__, reg);
+ return -EINVAL;
+ }
+
+ p_index = reg / 8 + 1;
+ bitnum = reg % 8;
+ if ((presence[p_index] & (1 << bitnum)) == 0x00) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Register %d is not present\n",
+ __func__, reg);
+ return -EINVAL;
+ }
+
+ structure = kmalloc(presence[0], GFP_KERNEL);
+ if (!structure) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for structure register\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.query_base + structure_offset,
+ structure,
+ presence[0]);
+ if (retval < 0)
+ goto exit;
+
+ s_index = 0;
+
+ for (regnum = 0; regnum < reg; regnum++) {
+ p_index = regnum / 8 + 1;
+ bitnum = regnum % 8;
+ if ((presence[p_index] & (1 << bitnum)) == 0x00)
+ continue;
+
+ if (structure[s_index] == 0x00)
+ s_index += 3;
+ else
+ s_index++;
+
+ while (structure[s_index] & ~MASK_7BIT)
+ s_index++;
+
+ s_index++;
+ }
+
+ cnt = 0;
+ s_index++;
+ offset = sub / 7;
+ bitnum = sub % 7;
+
+ do {
+ if (cnt == offset) {
+ if (structure[s_index + cnt] & (1 << bitnum))
+ retval = 1;
+ else
+ retval = 0;
+ goto exit;
+ }
+ cnt++;
+ } while (structure[s_index + cnt - 1] & ~MASK_7BIT);
+
+ retval = 0;
+
+exit:
+ kfree(structure);
+
+ return retval;
+}
+
+static int synaptics_rmi4_f12_init(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler,
+ struct synaptics_rmi4_fn_desc *fd,
+ unsigned int intr_count)
+{
+ int retval = 0;
+ int temp;
+ unsigned char subpacket;
+ unsigned char ctrl_23_size;
+ unsigned char size_of_2d_data;
+ unsigned char size_of_query5;
+ unsigned char size_of_query8;
+ unsigned char ctrl_8_offset;
+ unsigned char ctrl_20_offset;
+ unsigned char ctrl_23_offset;
+ unsigned char ctrl_28_offset;
+ unsigned char ctrl_31_offset;
+ unsigned char ctrl_58_offset;
+ unsigned char num_of_fingers;
+ struct synaptics_rmi4_f12_extra_data *extra_data;
+ struct synaptics_rmi4_f12_query_5 *query_5 = NULL;
+ struct synaptics_rmi4_f12_query_8 *query_8 = NULL;
+ struct synaptics_rmi4_f12_ctrl_8 *ctrl_8 = NULL;
+ struct synaptics_rmi4_f12_ctrl_23 *ctrl_23 = NULL;
+ struct synaptics_rmi4_f12_ctrl_31 *ctrl_31 = NULL;
+ struct synaptics_rmi4_f12_ctrl_58 *ctrl_58 = NULL;
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ fhandler->fn_number = fd->fn_number;
+ fhandler->num_of_data_sources = fd->intr_src_count;
+ fhandler->extra = kmalloc(sizeof(*extra_data), GFP_KERNEL);
+ if (!fhandler->extra) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for fhandler->extra\n",
+ __func__);
+ return -ENOMEM;
+ }
+ extra_data = (struct synaptics_rmi4_f12_extra_data *)fhandler->extra;
+ size_of_2d_data = sizeof(struct synaptics_rmi4_f12_finger_data);
+
+ query_5 = kzalloc(sizeof(*query_5), GFP_KERNEL);
+ if (!query_5) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for query_5\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ query_8 = kzalloc(sizeof(*query_8), GFP_KERNEL);
+ if (!query_8) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for query_8\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ ctrl_8 = kzalloc(sizeof(*ctrl_8), GFP_KERNEL);
+ if (!ctrl_8) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for ctrl_8\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ ctrl_23 = kzalloc(sizeof(*ctrl_23), GFP_KERNEL);
+ if (!ctrl_23) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for ctrl_23\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ ctrl_31 = kzalloc(sizeof(*ctrl_31), GFP_KERNEL);
+ if (!ctrl_31) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for ctrl_31\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ ctrl_58 = kzalloc(sizeof(*ctrl_58), GFP_KERNEL);
+ if (!ctrl_58) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for ctrl_58\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.query_base + 4,
+ &size_of_query5,
+ sizeof(size_of_query5));
+ if (retval < 0)
+ goto exit;
+
+ if (size_of_query5 > sizeof(query_5->data))
+ size_of_query5 = sizeof(query_5->data);
+ memset(query_5->data, 0x00, sizeof(query_5->data));
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.query_base + 5,
+ query_5->data,
+ size_of_query5);
+ if (retval < 0)
+ goto exit;
+
+ ctrl_8_offset = query_5->ctrl0_is_present +
+ query_5->ctrl1_is_present +
+ query_5->ctrl2_is_present +
+ query_5->ctrl3_is_present +
+ query_5->ctrl4_is_present +
+ query_5->ctrl5_is_present +
+ query_5->ctrl6_is_present +
+ query_5->ctrl7_is_present;
+
+ ctrl_20_offset = ctrl_8_offset +
+ query_5->ctrl8_is_present +
+ query_5->ctrl9_is_present +
+ query_5->ctrl10_is_present +
+ query_5->ctrl11_is_present +
+ query_5->ctrl12_is_present +
+ query_5->ctrl13_is_present +
+ query_5->ctrl14_is_present +
+ query_5->ctrl15_is_present +
+ query_5->ctrl16_is_present +
+ query_5->ctrl17_is_present +
+ query_5->ctrl18_is_present +
+ query_5->ctrl19_is_present;
+
+ ctrl_23_offset = ctrl_20_offset +
+ query_5->ctrl20_is_present +
+ query_5->ctrl21_is_present +
+ query_5->ctrl22_is_present;
+
+ ctrl_28_offset = ctrl_23_offset +
+ query_5->ctrl23_is_present +
+ query_5->ctrl24_is_present +
+ query_5->ctrl25_is_present +
+ query_5->ctrl26_is_present +
+ query_5->ctrl27_is_present;
+
+ ctrl_31_offset = ctrl_28_offset +
+ query_5->ctrl28_is_present +
+ query_5->ctrl29_is_present +
+ query_5->ctrl30_is_present;
+
+ ctrl_58_offset = ctrl_31_offset +
+ query_5->ctrl31_is_present +
+ query_5->ctrl32_is_present +
+ query_5->ctrl33_is_present +
+ query_5->ctrl34_is_present +
+ query_5->ctrl35_is_present +
+ query_5->ctrl36_is_present +
+ query_5->ctrl37_is_present +
+ query_5->ctrl38_is_present +
+ query_5->ctrl39_is_present +
+ query_5->ctrl40_is_present +
+ query_5->ctrl41_is_present +
+ query_5->ctrl42_is_present +
+ query_5->ctrl43_is_present +
+ query_5->ctrl44_is_present +
+ query_5->ctrl45_is_present +
+ query_5->ctrl46_is_present +
+ query_5->ctrl47_is_present +
+ query_5->ctrl48_is_present +
+ query_5->ctrl49_is_present +
+ query_5->ctrl50_is_present +
+ query_5->ctrl51_is_present +
+ query_5->ctrl52_is_present +
+ query_5->ctrl53_is_present +
+ query_5->ctrl54_is_present +
+ query_5->ctrl55_is_present +
+ query_5->ctrl56_is_present +
+ query_5->ctrl57_is_present;
+
+ ctrl_23_size = 2;
+ for (subpacket = 2; subpacket <= 4; subpacket++) {
+ retval = synaptics_rmi4_f12_find_sub(rmi4_data,
+ fhandler, query_5->data, sizeof(query_5->data),
+ 6, 23, subpacket);
+ if (retval == 1)
+ ctrl_23_size++;
+ else if (retval < 0)
+ goto exit;
+
+ }
+
+ retval = synaptics_rmi4_f12_find_sub(rmi4_data,
+ fhandler, query_5->data, sizeof(query_5->data),
+ 6, 20, 0);
+ if (retval == 1)
+ rmi4_data->set_wakeup_gesture = 2;
+ else if (retval == 0)
+ rmi4_data->set_wakeup_gesture = 0;
+ else if (retval < 0)
+ goto exit;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.ctrl_base + ctrl_23_offset,
+ ctrl_23->data,
+ ctrl_23_size);
+ if (retval < 0)
+ goto exit;
+
+ /* Maximum number of fingers supported */
+ fhandler->num_of_data_points = min_t(unsigned char,
+ ctrl_23->max_reported_objects,
+ (unsigned char)F12_FINGERS_TO_SUPPORT);
+
+ num_of_fingers = fhandler->num_of_data_points;
+ rmi4_data->num_of_fingers = num_of_fingers;
+
+ rmi4_data->stylus_enable = ctrl_23->stylus_enable;
+ rmi4_data->eraser_enable = ctrl_23->eraser_enable;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.query_base + 7,
+ &size_of_query8,
+ sizeof(size_of_query8));
+ if (retval < 0)
+ goto exit;
+
+ if (size_of_query8 > sizeof(query_8->data))
+ size_of_query8 = sizeof(query_8->data);
+ memset(query_8->data, 0x00, sizeof(query_8->data));
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.query_base + 8,
+ query_8->data,
+ size_of_query8);
+ if (retval < 0)
+ goto exit;
+
+ /* Determine the presence of the Data0 register */
+ extra_data->data1_offset = query_8->data0_is_present;
+
+ if ((size_of_query8 >= 3) && (query_8->data15_is_present)) {
+ extra_data->data15_offset = query_8->data0_is_present +
+ query_8->data1_is_present +
+ query_8->data2_is_present +
+ query_8->data3_is_present +
+ query_8->data4_is_present +
+ query_8->data5_is_present +
+ query_8->data6_is_present +
+ query_8->data7_is_present +
+ query_8->data8_is_present +
+ query_8->data9_is_present +
+ query_8->data10_is_present +
+ query_8->data11_is_present +
+ query_8->data12_is_present +
+ query_8->data13_is_present +
+ query_8->data14_is_present;
+ extra_data->data15_size = (num_of_fingers + 7) / 8;
+ } else {
+ extra_data->data15_size = 0;
+ }
+
+#ifdef REPORT_2D_PRESSURE
+ if ((size_of_query8 >= 5) && (query_8->data29_is_present)) {
+ extra_data->data29_offset = query_8->data0_is_present +
+ query_8->data1_is_present +
+ query_8->data2_is_present +
+ query_8->data3_is_present +
+ query_8->data4_is_present +
+ query_8->data5_is_present +
+ query_8->data6_is_present +
+ query_8->data7_is_present +
+ query_8->data8_is_present +
+ query_8->data9_is_present +
+ query_8->data10_is_present +
+ query_8->data11_is_present +
+ query_8->data12_is_present +
+ query_8->data13_is_present +
+ query_8->data14_is_present +
+ query_8->data15_is_present +
+ query_8->data16_is_present +
+ query_8->data17_is_present +
+ query_8->data18_is_present +
+ query_8->data19_is_present +
+ query_8->data20_is_present +
+ query_8->data21_is_present +
+ query_8->data22_is_present +
+ query_8->data23_is_present +
+ query_8->data24_is_present +
+ query_8->data25_is_present +
+ query_8->data26_is_present +
+ query_8->data27_is_present +
+ query_8->data28_is_present;
+ extra_data->data29_size = 0;
+ for (subpacket = 0; subpacket <= num_of_fingers; subpacket++) {
+ retval = synaptics_rmi4_f12_find_sub(rmi4_data,
+ fhandler, query_8->data,
+ sizeof(query_8->data),
+ 9, 29, subpacket);
+ if (retval == 1)
+ extra_data->data29_size += 2;
+ else if (retval < 0)
+ goto exit;
+ }
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.ctrl_base + ctrl_58_offset,
+ ctrl_58->data,
+ sizeof(ctrl_58->data));
+ if (retval < 0)
+ goto exit;
+ rmi4_data->force_min =
+ (int)(ctrl_58->min_force_lsb << 0) |
+ (int)(ctrl_58->min_force_msb << 8);
+ rmi4_data->force_max =
+ (int)(ctrl_58->max_force_lsb << 0) |
+ (int)(ctrl_58->max_force_msb << 8);
+ rmi4_data->report_pressure = true;
+ } else {
+ extra_data->data29_size = 0;
+ rmi4_data->report_pressure = false;
+ }
+#endif
+
+ rmi4_data->report_enable = RPT_DEFAULT;
+#ifdef REPORT_2D_Z
+ rmi4_data->report_enable |= RPT_Z;
+#endif
+#ifdef REPORT_2D_W
+ rmi4_data->report_enable |= (RPT_WX | RPT_WY);
+#endif
+
+ retval = synaptics_rmi4_f12_set_enables(rmi4_data,
+ fhandler->full_addr.ctrl_base + ctrl_28_offset);
+ if (retval < 0)
+ goto exit;
+
+ if (query_5->ctrl8_is_present) {
+ rmi4_data->wedge_sensor = false;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.ctrl_base + ctrl_8_offset,
+ ctrl_8->data,
+ sizeof(ctrl_8->data));
+ if (retval < 0)
+ goto exit;
+
+ /* Maximum x and y */
+ rmi4_data->sensor_max_x =
+ ((unsigned int)ctrl_8->max_x_coord_lsb << 0) |
+ ((unsigned int)ctrl_8->max_x_coord_msb << 8);
+ rmi4_data->sensor_max_y =
+ ((unsigned int)ctrl_8->max_y_coord_lsb << 0) |
+ ((unsigned int)ctrl_8->max_y_coord_msb << 8);
+
+ rmi4_data->max_touch_width = MAX_F12_TOUCH_WIDTH;
+ } else {
+ rmi4_data->wedge_sensor = true;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.ctrl_base + ctrl_31_offset,
+ ctrl_31->data,
+ sizeof(ctrl_31->data));
+ if (retval < 0)
+ goto exit;
+
+ /* Maximum x and y */
+ rmi4_data->sensor_max_x =
+ ((unsigned int)ctrl_31->max_x_coord_lsb << 0) |
+ ((unsigned int)ctrl_31->max_x_coord_msb << 8);
+ rmi4_data->sensor_max_y =
+ ((unsigned int)ctrl_31->max_y_coord_lsb << 0) |
+ ((unsigned int)ctrl_31->max_y_coord_msb << 8);
+
+ rmi4_data->max_touch_width = MAX_F12_TOUCH_WIDTH;
+ }
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Function %02x max x = %d max y = %d\n",
+ __func__, fhandler->fn_number,
+ rmi4_data->sensor_max_x,
+ rmi4_data->sensor_max_y);
+
+ if (bdata->swap_axes) {
+ temp = rmi4_data->sensor_max_x;
+ rmi4_data->sensor_max_x = rmi4_data->sensor_max_y;
+ rmi4_data->sensor_max_y = temp;
+ }
+
+ rmi4_data->f12_wakeup_gesture = query_5->ctrl27_is_present;
+ if (rmi4_data->f12_wakeup_gesture) {
+ extra_data->ctrl20_offset = ctrl_20_offset;
+ extra_data->data4_offset = query_8->data0_is_present +
+ query_8->data1_is_present +
+ query_8->data2_is_present +
+ query_8->data3_is_present;
+ }
+
+ synaptics_rmi4_set_intr_mask(fhandler, fd, intr_count);
+
+ /* Allocate memory for finger data storage space */
+ fhandler->data_size = num_of_fingers * size_of_2d_data;
+ fhandler->data = kmalloc(fhandler->data_size, GFP_KERNEL);
+ if (!fhandler->data) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for fhandler->data\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+exit:
+ kfree(query_5);
+ kfree(query_8);
+ kfree(ctrl_8);
+ kfree(ctrl_23);
+ kfree(ctrl_31);
+ kfree(ctrl_58);
+
+ return retval;
+}
+
+static int synaptics_rmi4_f1a_alloc_mem(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler)
+{
+ int retval;
+ struct synaptics_rmi4_f1a_handle *f1a;
+
+ f1a = kzalloc(sizeof(*f1a), GFP_KERNEL);
+ if (!f1a) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for function handle\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ fhandler->data = (void *)f1a;
+ fhandler->extra = NULL;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.query_base,
+ f1a->button_query.data,
+ sizeof(f1a->button_query.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read query registers\n",
+ __func__);
+ return retval;
+ }
+
+ f1a->max_count = f1a->button_query.max_button_count + 1;
+
+ f1a->button_control.txrx_map = kzalloc(f1a->max_count * 2, GFP_KERNEL);
+ if (!f1a->button_control.txrx_map) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for tx rx mapping\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ f1a->button_bitmask_size = (f1a->max_count + 7) / 8;
+
+ f1a->button_data_buffer = kcalloc(f1a->button_bitmask_size,
+ sizeof(*(f1a->button_data_buffer)), GFP_KERNEL);
+ if (!f1a->button_data_buffer) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for data buffer\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ f1a->button_map = kcalloc(f1a->max_count,
+ sizeof(*(f1a->button_map)), GFP_KERNEL);
+ if (!f1a->button_map) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for button map\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int synaptics_rmi4_f1a_button_map(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char offset = 0;
+ struct synaptics_rmi4_f1a_query_4 query_4;
+ struct synaptics_rmi4_f1a_handle *f1a = fhandler->data;
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ rmi4_data->valid_button_count = f1a->valid_button_count;
+
+ offset = f1a->button_query.has_general_control +
+ f1a->button_query.has_interrupt_enable +
+ f1a->button_query.has_multibutton_select;
+
+ if (f1a->button_query.has_tx_rx_map) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.ctrl_base + offset,
+ f1a->button_control.txrx_map,
+ f1a->max_count * 2);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read tx rx mapping\n",
+ __func__);
+ return retval;
+ }
+
+ rmi4_data->button_txrx_mapping = f1a->button_control.txrx_map;
+ }
+
+ if (f1a->button_query.has_query4) {
+ offset = 2 + f1a->button_query.has_query2 +
+ f1a->button_query.has_query3;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fhandler->full_addr.query_base + offset,
+ query_4.data,
+ sizeof(query_4.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read button features 4\n",
+ __func__);
+ return retval;
+ }
+
+ if (query_4.has_ctrl24)
+ rmi4_data->external_afe_buttons = true;
+ else
+ rmi4_data->external_afe_buttons = false;
+ }
+
+ if (!bdata->cap_button_map) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: cap_button_map is NULL in board file\n",
+ __func__);
+ return -ENODEV;
+ } else if (!bdata->cap_button_map->map) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Button map is missing in board file\n",
+ __func__);
+ return -ENODEV;
+ } else {
+ if (bdata->cap_button_map->nbuttons != f1a->max_count) {
+ f1a->valid_button_count = min(f1a->max_count,
+ bdata->cap_button_map->nbuttons);
+ } else {
+ f1a->valid_button_count = f1a->max_count;
+ }
+
+ for (ii = 0; ii < f1a->valid_button_count; ii++)
+ f1a->button_map[ii] = bdata->cap_button_map->map[ii];
+
+ rmi4_data->valid_button_count = f1a->valid_button_count;
+ }
+
+ return 0;
+}
+
+static void synaptics_rmi4_f1a_kfree(struct synaptics_rmi4_fn *fhandler)
+{
+ struct synaptics_rmi4_f1a_handle *f1a = fhandler->data;
+
+ if (f1a) {
+ kfree(f1a->button_control.txrx_map);
+ kfree(f1a->button_data_buffer);
+ kfree(f1a->button_map);
+ kfree(f1a);
+ fhandler->data = NULL;
+ }
+}
+
+static int synaptics_rmi4_f1a_init(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler,
+ struct synaptics_rmi4_fn_desc *fd,
+ unsigned int intr_count)
+{
+ int retval;
+
+ fhandler->fn_number = fd->fn_number;
+ fhandler->num_of_data_sources = fd->intr_src_count;
+
+ synaptics_rmi4_set_intr_mask(fhandler, fd, intr_count);
+
+ retval = synaptics_rmi4_f1a_alloc_mem(rmi4_data, fhandler);
+ if (retval < 0)
+ goto error_exit;
+
+ retval = synaptics_rmi4_f1a_button_map(rmi4_data, fhandler);
+ if (retval < 0)
+ goto error_exit;
+
+ rmi4_data->button_0d_enabled = 1;
+
+ return 0;
+
+error_exit:
+ synaptics_rmi4_f1a_kfree(fhandler);
+
+ return retval;
+}
+
+static void synaptics_rmi4_empty_fn_list(struct synaptics_rmi4_data *rmi4_data)
+{
+ struct synaptics_rmi4_fn *fhandler;
+ struct synaptics_rmi4_fn *fhandler_temp;
+ struct synaptics_rmi4_device_info *rmi;
+
+ rmi = &(rmi4_data->rmi4_mod_info);
+
+ if (!list_empty(&rmi->support_fn_list)) {
+ list_for_each_entry_safe(fhandler,
+ fhandler_temp,
+ &rmi->support_fn_list,
+ link) {
+ if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) {
+ synaptics_rmi4_f1a_kfree(fhandler);
+ } else {
+ kfree(fhandler->extra);
+ kfree(fhandler->data);
+ }
+ list_del(&fhandler->link);
+ kfree(fhandler);
+ }
+ }
+ INIT_LIST_HEAD(&rmi->support_fn_list);
+}
+
+static int synaptics_rmi4_check_status(struct synaptics_rmi4_data *rmi4_data,
+ bool *was_in_bl_mode)
+{
+ int retval;
+ int timeout = CHECK_STATUS_TIMEOUT_MS;
+ struct synaptics_rmi4_f01_device_status status;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_data_base_addr,
+ status.data,
+ sizeof(status.data));
+ if (retval < 0)
+ return retval;
+
+ while (status.status_code == STATUS_CRC_IN_PROGRESS) {
+ if (timeout > 0)
+ msleep(20);
+ else
+ return -EINVAL;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_data_base_addr,
+ status.data,
+ sizeof(status.data));
+ if (retval < 0)
+ return retval;
+
+ timeout -= 20;
+ }
+
+ if (timeout != CHECK_STATUS_TIMEOUT_MS)
+ *was_in_bl_mode = true;
+
+ if (status.flash_prog == 1) {
+ rmi4_data->flash_prog_mode = true;
+ pr_notice("%s: In flash prog mode, status = 0x%02x\n",
+ __func__,
+ status.status_code);
+ } else {
+ rmi4_data->flash_prog_mode = false;
+ }
+
+ return 0;
+}
+
+static int synaptics_rmi4_set_configured(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned char device_ctrl;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ &device_ctrl,
+ sizeof(device_ctrl));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set configured\n",
+ __func__);
+ return retval;
+ }
+
+ rmi4_data->no_sleep_setting = device_ctrl & NO_SLEEP_ON;
+ device_ctrl |= CONFIGURED;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ &device_ctrl,
+ sizeof(device_ctrl));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set configured\n",
+ __func__);
+ }
+
+ return retval;
+}
+
+static int synaptics_rmi4_alloc_fh(struct synaptics_rmi4_fn **fhandler,
+ struct synaptics_rmi4_fn_desc *rmi_fd, int page_number)
+{
+ *fhandler = kzalloc(sizeof(**fhandler), GFP_KERNEL);
+ if (!(*fhandler))
+ return -ENOMEM;
+
+ (*fhandler)->full_addr.data_base =
+ (rmi_fd->data_base_addr |
+ (page_number << 8));
+ (*fhandler)->full_addr.ctrl_base =
+ (rmi_fd->ctrl_base_addr |
+ (page_number << 8));
+ (*fhandler)->full_addr.cmd_base =
+ (rmi_fd->cmd_base_addr |
+ (page_number << 8));
+ (*fhandler)->full_addr.query_base =
+ (rmi_fd->query_base_addr |
+ (page_number << 8));
+
+ return 0;
+}
+
+static int synaptics_rmi4_query_device(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned char page_number;
+ unsigned char intr_count;
+ unsigned char *f01_query;
+ unsigned short pdt_entry_addr;
+ bool f01found;
+ bool f35found;
+ bool was_in_bl_mode;
+ struct synaptics_rmi4_fn_desc rmi_fd;
+ struct synaptics_rmi4_fn *fhandler;
+ struct synaptics_rmi4_device_info *rmi;
+
+ rmi = &(rmi4_data->rmi4_mod_info);
+
+rescan_pdt:
+ f01found = false;
+ f35found = false;
+ was_in_bl_mode = false;
+ intr_count = 0;
+ INIT_LIST_HEAD(&rmi->support_fn_list);
+
+ /* Scan the page description tables of the pages to service */
+ for (page_number = 0; page_number < PAGES_TO_SERVICE; page_number++) {
+ for (pdt_entry_addr = PDT_START; pdt_entry_addr > PDT_END;
+ pdt_entry_addr -= PDT_ENTRY_SIZE) {
+ pdt_entry_addr |= (page_number << 8);
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ pdt_entry_addr,
+ (unsigned char *)&rmi_fd,
+ sizeof(rmi_fd));
+ if (retval < 0)
+ return retval;
+
+ pdt_entry_addr &= ~(MASK_8BIT << 8);
+
+ fhandler = NULL;
+
+ if (rmi_fd.fn_number == 0) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Reached end of PDT\n",
+ __func__);
+ break;
+ }
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: F%02x found (page %d)\n",
+ __func__, rmi_fd.fn_number,
+ page_number);
+
+ switch (rmi_fd.fn_number) {
+ case SYNAPTICS_RMI4_F01:
+ if (rmi_fd.intr_src_count == 0)
+ break;
+
+ f01found = true;
+
+ retval = synaptics_rmi4_alloc_fh(&fhandler,
+ &rmi_fd, page_number);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc for F%d\n",
+ __func__,
+ rmi_fd.fn_number);
+ return retval;
+ }
+
+ retval = synaptics_rmi4_f01_init(rmi4_data,
+ fhandler, &rmi_fd, intr_count);
+ if (retval < 0)
+ return retval;
+
+ retval = synaptics_rmi4_check_status(rmi4_data,
+ &was_in_bl_mode);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to check status\n",
+ __func__);
+ return retval;
+ }
+
+ if (was_in_bl_mode) {
+ kfree(fhandler);
+ fhandler = NULL;
+ goto rescan_pdt;
+ }
+
+ if (rmi4_data->flash_prog_mode)
+ goto flash_prog_mode;
+
+ break;
+ case SYNAPTICS_RMI4_F11:
+ if (rmi_fd.intr_src_count == 0)
+ break;
+
+ retval = synaptics_rmi4_alloc_fh(&fhandler,
+ &rmi_fd, page_number);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc for F%d\n",
+ __func__,
+ rmi_fd.fn_number);
+ return retval;
+ }
+
+ retval = synaptics_rmi4_f11_init(rmi4_data,
+ fhandler, &rmi_fd, intr_count);
+ if (retval < 0)
+ return retval;
+ break;
+ case SYNAPTICS_RMI4_F12:
+ if (rmi_fd.intr_src_count == 0)
+ break;
+
+ retval = synaptics_rmi4_alloc_fh(&fhandler,
+ &rmi_fd, page_number);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc for F%d\n",
+ __func__,
+ rmi_fd.fn_number);
+ return retval;
+ }
+
+ retval = synaptics_rmi4_f12_init(rmi4_data,
+ fhandler, &rmi_fd, intr_count);
+ if (retval < 0)
+ return retval;
+ break;
+ case SYNAPTICS_RMI4_F1A:
+ if (rmi_fd.intr_src_count == 0)
+ break;
+
+ retval = synaptics_rmi4_alloc_fh(&fhandler,
+ &rmi_fd, page_number);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc for F%d\n",
+ __func__,
+ rmi_fd.fn_number);
+ return retval;
+ }
+
+ retval = synaptics_rmi4_f1a_init(rmi4_data,
+ fhandler, &rmi_fd, intr_count);
+ if (retval < 0) {
+#ifdef IGNORE_FN_INIT_FAILURE
+ kfree(fhandler);
+ fhandler = NULL;
+#else
+ return retval;
+#endif
+ }
+ break;
+#ifdef USE_DATA_SERVER
+ case SYNAPTICS_RMI4_F21:
+ if (rmi_fd.intr_src_count == 0)
+ break;
+
+ retval = synaptics_rmi4_alloc_fh(&fhandler,
+ &rmi_fd, page_number);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc for F%d\n",
+ __func__,
+ rmi_fd.fn_number);
+ return retval;
+ }
+
+ fhandler->fn_number = rmi_fd.fn_number;
+ fhandler->num_of_data_sources =
+ rmi_fd.intr_src_count;
+
+ synaptics_rmi4_set_intr_mask(fhandler, &rmi_fd,
+ intr_count);
+ break;
+#endif
+ case SYNAPTICS_RMI4_F35:
+ f35found = true;
+ break;
+#ifdef F51_DISCRETE_FORCE
+ case SYNAPTICS_RMI4_F51:
+ rmi4_data->f51_query_base_addr =
+ rmi_fd.query_base_addr |
+ (page_number << 8);
+ break;
+#endif
+ }
+
+ /* Accumulate the interrupt count */
+ intr_count += rmi_fd.intr_src_count;
+
+ if (fhandler && rmi_fd.intr_src_count) {
+ list_add_tail(&fhandler->link,
+ &rmi->support_fn_list);
+ }
+ }
+ }
+
+ if (!f01found) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to find F01\n",
+ __func__);
+ if (!f35found) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to find F35\n",
+ __func__);
+ return -EINVAL;
+ } else {
+ pr_notice("%s: In microbootloader mode\n",
+ __func__);
+ return 0;
+ }
+ }
+
+flash_prog_mode:
+ rmi4_data->num_of_intr_regs = (intr_count + 7) / 8;
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Number of interrupt registers = %d\n",
+ __func__, rmi4_data->num_of_intr_regs);
+
+ f01_query = kmalloc(F01_STD_QUERY_LEN, GFP_KERNEL);
+ if (!f01_query) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for f01_query\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_query_base_addr,
+ f01_query,
+ F01_STD_QUERY_LEN);
+ if (retval < 0) {
+ kfree(f01_query);
+ return retval;
+ }
+
+ /* RMI Version 4.0 currently supported */
+ rmi->version_major = 4;
+ rmi->version_minor = 0;
+
+ rmi->manufacturer_id = f01_query[0];
+ rmi->product_props = f01_query[1];
+ rmi->product_info[0] = f01_query[2];
+ rmi->product_info[1] = f01_query[3];
+ retval = secure_memcpy(rmi->product_id_string,
+ sizeof(rmi->product_id_string),
+ &f01_query[11],
+ F01_STD_QUERY_LEN - 11,
+ PRODUCT_ID_SIZE);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy product ID string\n",
+ __func__);
+ }
+
+ kfree(f01_query);
+
+ if (rmi->manufacturer_id != 1) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Non-Synaptics device found, manufacturer ID = %d\n",
+ __func__, rmi->manufacturer_id);
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_query_base_addr + F01_BUID_ID_OFFSET,
+ rmi->build_id,
+ sizeof(rmi->build_id));
+ if (retval < 0)
+ return retval;
+
+ rmi4_data->firmware_id = (unsigned int)rmi->build_id[0] +
+ (unsigned int)rmi->build_id[1] * 0x100 +
+ (unsigned int)rmi->build_id[2] * 0x10000;
+
+ memset(rmi4_data->intr_mask, 0x00, sizeof(rmi4_data->intr_mask));
+
+ /*
+ * Map out the interrupt bit masks for the interrupt sources
+ * from the registered function handlers.
+ */
+ if (!list_empty(&rmi->support_fn_list)) {
+ list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+ if (fhandler->num_of_data_sources) {
+ rmi4_data->intr_mask[fhandler->intr_reg_num] |=
+ fhandler->intr_mask;
+ }
+ }
+ }
+
+ if (rmi4_data->f11_wakeup_gesture || rmi4_data->f12_wakeup_gesture)
+ rmi4_data->enable_wakeup_gesture = WAKEUP_GESTURE;
+ else
+ rmi4_data->enable_wakeup_gesture = false;
+
+ synaptics_rmi4_set_configured(rmi4_data);
+
+ return 0;
+}
+
+static int synaptics_rmi4_gpio_setup(int gpio, bool config, int dir, int state)
+{
+ int retval = 0;
+ unsigned char buf[16];
+
+ if (config) {
+ snprintf(buf, PAGE_SIZE, "dsx_gpio_%u\n", gpio);
+
+ retval = gpio_request(gpio, buf);
+ if (retval) {
+ pr_err("%s: Failed to get gpio %d (code: %d)",
+ __func__, gpio, retval);
+ return retval;
+ }
+
+ if (dir == 0)
+ retval = gpio_direction_input(gpio);
+ else
+ retval = gpio_direction_output(gpio, state);
+ if (retval) {
+ pr_err("%s: Failed to set gpio %d direction",
+ __func__, gpio);
+ return retval;
+ }
+ } else {
+ gpio_free(gpio);
+ }
+
+ return retval;
+}
+
+static void synaptics_rmi4_set_params(struct synaptics_rmi4_data *rmi4_data)
+{
+ unsigned char ii;
+ struct synaptics_rmi4_f1a_handle *f1a;
+ struct synaptics_rmi4_fn *fhandler;
+ struct synaptics_rmi4_device_info *rmi;
+
+ rmi = &(rmi4_data->rmi4_mod_info);
+
+ input_set_abs_params(rmi4_data->input_dev,
+ ABS_MT_POSITION_X, 0,
+ rmi4_data->sensor_max_x, 0, 0);
+ input_set_abs_params(rmi4_data->input_dev,
+ ABS_MT_POSITION_Y, 0,
+ rmi4_data->sensor_max_y, 0, 0);
+#ifdef REPORT_2D_W
+ input_set_abs_params(rmi4_data->input_dev,
+ ABS_MT_TOUCH_MAJOR, 0,
+ rmi4_data->max_touch_width, 0, 0);
+ input_set_abs_params(rmi4_data->input_dev,
+ ABS_MT_TOUCH_MINOR, 0,
+ rmi4_data->max_touch_width, 0, 0);
+#endif
+
+ rmi4_data->input_settings.sensor_max_x = rmi4_data->sensor_max_x;
+ rmi4_data->input_settings.sensor_max_y = rmi4_data->sensor_max_y;
+ rmi4_data->input_settings.max_touch_width = rmi4_data->max_touch_width;
+
+#ifdef REPORT_2D_PRESSURE
+ if (rmi4_data->report_pressure) {
+ input_set_abs_params(rmi4_data->input_dev,
+ ABS_MT_PRESSURE, rmi4_data->force_min,
+ rmi4_data->force_max, 0, 0);
+
+ rmi4_data->input_settings.force_min = rmi4_data->force_min;
+ rmi4_data->input_settings.force_max = rmi4_data->force_max;
+ }
+#elif defined(F51_DISCRETE_FORCE)
+ input_set_abs_params(rmi4_data->input_dev,
+ ABS_MT_PRESSURE, 0,
+ FORCE_LEVEL_MAX, 0, 0);
+#endif
+
+#ifdef TYPE_B_PROTOCOL
+#ifdef KERNEL_ABOVE_3_6
+ input_mt_init_slots(rmi4_data->input_dev,
+ rmi4_data->num_of_fingers, INPUT_MT_DIRECT);
+#else
+ input_mt_init_slots(rmi4_data->input_dev,
+ rmi4_data->num_of_fingers);
+#endif
+#endif
+
+ rmi4_data->input_settings.num_of_fingers = rmi4_data->num_of_fingers;
+
+ f1a = NULL;
+ if (!list_empty(&rmi->support_fn_list)) {
+ list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+ if (fhandler->fn_number == SYNAPTICS_RMI4_F1A)
+ f1a = fhandler->data;
+ }
+ }
+
+ if (f1a) {
+ for (ii = 0; ii < f1a->valid_button_count; ii++) {
+ set_bit(f1a->button_map[ii],
+ rmi4_data->input_dev->keybit);
+ input_set_capability(rmi4_data->input_dev,
+ EV_KEY, f1a->button_map[ii]);
+ }
+
+ rmi4_data->input_settings.valid_button_count =
+ f1a->valid_button_count;
+ }
+
+ if (vir_button_map->nbuttons) {
+ for (ii = 0; ii < vir_button_map->nbuttons; ii++) {
+ set_bit(vir_button_map->map[ii * 5],
+ rmi4_data->input_dev->keybit);
+ input_set_capability(rmi4_data->input_dev,
+ EV_KEY, vir_button_map->map[ii * 5]);
+ }
+ }
+
+ if (rmi4_data->f11_wakeup_gesture || rmi4_data->f12_wakeup_gesture) {
+ set_bit(KEY_WAKEUP, rmi4_data->input_dev->keybit);
+ input_set_capability(rmi4_data->input_dev, EV_KEY, KEY_WAKEUP);
+ }
+}
+
+static int synaptics_rmi4_set_input_dev(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ rmi4_data->input_dev = input_allocate_device();
+ if (rmi4_data->input_dev == NULL) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to allocate input device\n",
+ __func__);
+ retval = -ENOMEM;
+ goto err_input_device;
+ }
+
+ retval = synaptics_rmi4_query_device(rmi4_data);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to query device\n",
+ __func__);
+ goto err_query_device;
+ }
+
+ rmi4_data->input_dev->name = PLATFORM_DRIVER_NAME;
+ rmi4_data->input_dev->phys = INPUT_PHYS_NAME;
+ rmi4_data->input_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+ rmi4_data->input_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+ rmi4_data->input_dev->dev.parent = rmi4_data->pdev->dev.parent;
+ input_set_drvdata(rmi4_data->input_dev, rmi4_data);
+
+ set_bit(EV_SYN, rmi4_data->input_dev->evbit);
+ set_bit(EV_KEY, rmi4_data->input_dev->evbit);
+ set_bit(EV_ABS, rmi4_data->input_dev->evbit);
+ set_bit(BTN_TOUCH, rmi4_data->input_dev->keybit);
+ set_bit(BTN_TOOL_FINGER, rmi4_data->input_dev->keybit);
+#ifdef INPUT_PROP_DIRECT
+ set_bit(INPUT_PROP_DIRECT, rmi4_data->input_dev->propbit);
+#endif
+
+ if (bdata->max_y_for_2d >= 0)
+ rmi4_data->sensor_max_y = bdata->max_y_for_2d;
+
+ synaptics_rmi4_set_params(rmi4_data);
+
+ retval = input_register_device(rmi4_data->input_dev);
+ if (retval) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to register input device\n",
+ __func__);
+ goto err_register_input;
+ }
+
+ rmi4_data->input_settings.stylus_enable = rmi4_data->stylus_enable;
+ rmi4_data->input_settings.eraser_enable = rmi4_data->eraser_enable;
+
+ if (!rmi4_data->stylus_enable)
+ return 0;
+
+ rmi4_data->stylus_dev = input_allocate_device();
+ if (rmi4_data->stylus_dev == NULL) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to allocate stylus device\n",
+ __func__);
+ retval = -ENOMEM;
+ goto err_stylus_device;
+ }
+
+ rmi4_data->stylus_dev->name = STYLUS_DRIVER_NAME;
+ rmi4_data->stylus_dev->phys = STYLUS_PHYS_NAME;
+ rmi4_data->stylus_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+ rmi4_data->stylus_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+ rmi4_data->stylus_dev->dev.parent = rmi4_data->pdev->dev.parent;
+ input_set_drvdata(rmi4_data->stylus_dev, rmi4_data);
+
+ set_bit(EV_KEY, rmi4_data->stylus_dev->evbit);
+ set_bit(EV_ABS, rmi4_data->stylus_dev->evbit);
+ set_bit(BTN_TOUCH, rmi4_data->stylus_dev->keybit);
+ set_bit(BTN_TOOL_PEN, rmi4_data->stylus_dev->keybit);
+ if (rmi4_data->eraser_enable)
+ set_bit(BTN_TOOL_RUBBER, rmi4_data->stylus_dev->keybit);
+#ifdef INPUT_PROP_DIRECT
+ set_bit(INPUT_PROP_DIRECT, rmi4_data->stylus_dev->propbit);
+#endif
+
+ input_set_abs_params(rmi4_data->stylus_dev, ABS_X, 0,
+ rmi4_data->sensor_max_x, 0, 0);
+ input_set_abs_params(rmi4_data->stylus_dev, ABS_Y, 0,
+ rmi4_data->sensor_max_y, 0, 0);
+
+ retval = input_register_device(rmi4_data->stylus_dev);
+ if (retval) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to register stylus device\n",
+ __func__);
+ goto err_register_stylus;
+ }
+
+ return 0;
+
+err_register_stylus:
+ rmi4_data->stylus_dev = NULL;
+
+err_stylus_device:
+ input_unregister_device(rmi4_data->input_dev);
+ rmi4_data->input_dev = NULL;
+
+err_register_input:
+err_query_device:
+ synaptics_rmi4_empty_fn_list(rmi4_data);
+ input_free_device(rmi4_data->input_dev);
+
+err_input_device:
+ return retval;
+}
+
+static int synaptics_rmi4_set_gpio(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ retval = synaptics_rmi4_gpio_setup(
+ bdata->irq_gpio,
+ true, 0, 0);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to configure attention GPIO\n",
+ __func__);
+ goto err_gpio_irq;
+ }
+
+ if (bdata->power_gpio >= 0) {
+ retval = synaptics_rmi4_gpio_setup(
+ bdata->power_gpio,
+ true, 1, !bdata->power_on_state);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to configure power GPIO\n",
+ __func__);
+ goto err_gpio_power;
+ }
+ }
+
+ if (bdata->reset_gpio >= 0) {
+ retval = synaptics_rmi4_gpio_setup(
+ bdata->reset_gpio,
+ true, 1, !bdata->reset_on_state);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to configure reset GPIO\n",
+ __func__);
+ goto err_gpio_reset;
+ }
+ }
+
+ if (bdata->power_gpio >= 0) {
+ gpio_set_value(bdata->power_gpio, bdata->power_on_state);
+ msleep(bdata->power_delay_ms);
+ }
+
+ if (bdata->reset_gpio >= 0) {
+ gpio_set_value(bdata->reset_gpio, bdata->reset_on_state);
+ msleep(bdata->reset_active_ms);
+ gpio_set_value(bdata->reset_gpio, !bdata->reset_on_state);
+ msleep(bdata->reset_delay_ms);
+ }
+
+ return 0;
+
+err_gpio_reset:
+ if (bdata->power_gpio >= 0)
+ synaptics_rmi4_gpio_setup(bdata->power_gpio, false, 0, 0);
+
+err_gpio_power:
+ synaptics_rmi4_gpio_setup(bdata->irq_gpio, false, 0, 0);
+
+err_gpio_irq:
+ return retval;
+}
+
+static int synaptics_dsx_pinctrl_init(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+
+ /* Get pinctrl if target uses pinctrl */
+ rmi4_data->ts_pinctrl = devm_pinctrl_get((rmi4_data->pdev->dev.parent));
+ if (IS_ERR_OR_NULL(rmi4_data->ts_pinctrl)) {
+ retval = PTR_ERR(rmi4_data->ts_pinctrl);
+ dev_err(rmi4_data->pdev->dev.parent,
+ "Target does not use pinctrl %d\n", retval);
+ goto err_pinctrl_get;
+ }
+
+ rmi4_data->pinctrl_state_active
+ = pinctrl_lookup_state(rmi4_data->ts_pinctrl, "pmx_ts_active");
+ if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_active)) {
+ retval = PTR_ERR(rmi4_data->pinctrl_state_active);
+ dev_err(rmi4_data->pdev->dev.parent,
+ "Can not lookup %s pinstate %d\n",
+ PINCTRL_STATE_ACTIVE, retval);
+ goto err_pinctrl_lookup;
+ }
+
+ rmi4_data->pinctrl_state_suspend
+ = pinctrl_lookup_state(rmi4_data->ts_pinctrl, "pmx_ts_suspend");
+ if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_suspend)) {
+ retval = PTR_ERR(rmi4_data->pinctrl_state_suspend);
+ dev_err(rmi4_data->pdev->dev.parent,
+ "Can not lookup %s pinstate %d\n",
+ PINCTRL_STATE_SUSPEND, retval);
+ goto err_pinctrl_lookup;
+ }
+
+ rmi4_data->pinctrl_state_release
+ = pinctrl_lookup_state(rmi4_data->ts_pinctrl, "pmx_ts_release");
+ if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_release)) {
+ retval = PTR_ERR(rmi4_data->pinctrl_state_release);
+ dev_err(rmi4_data->pdev->dev.parent,
+ "Can not lookup %s pinstate %d\n",
+ PINCTRL_STATE_RELEASE, retval);
+ }
+
+ return 0;
+
+err_pinctrl_lookup:
+ devm_pinctrl_put(rmi4_data->ts_pinctrl);
+err_pinctrl_get:
+ rmi4_data->ts_pinctrl = NULL;
+ return retval;
+}
+
+
+static int synaptics_rmi4_get_reg(struct synaptics_rmi4_data *rmi4_data,
+ bool get)
+{
+ int retval;
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ if (!get) {
+ retval = 0;
+ goto regulator_put;
+ }
+
+ if ((bdata->pwr_reg_name != NULL) && (*bdata->pwr_reg_name != 0)) {
+ rmi4_data->pwr_reg = regulator_get(rmi4_data->pdev->dev.parent,
+ bdata->pwr_reg_name);
+ if (IS_ERR(rmi4_data->pwr_reg)) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to get power regulator\n",
+ __func__);
+ retval = PTR_ERR(rmi4_data->pwr_reg);
+ goto regulator_put;
+ }
+ }
+
+ retval = regulator_set_load(rmi4_data->pwr_reg,
+ 20000);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set regulator current avdd\n",
+ __func__);
+ goto regulator_put;
+ }
+
+ retval = regulator_set_voltage(rmi4_data->pwr_reg,
+ 3000000,
+ 3000000);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set regulator voltage avdd\n",
+ __func__);
+ goto regulator_put;
+ }
+
+ if ((bdata->bus_reg_name != NULL) && (*bdata->bus_reg_name != 0)) {
+ rmi4_data->bus_reg = regulator_get(rmi4_data->pdev->dev.parent,
+ bdata->bus_reg_name);
+ if (IS_ERR(rmi4_data->bus_reg)) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to get bus pullup regulator\n",
+ __func__);
+ retval = PTR_ERR(rmi4_data->bus_reg);
+ goto regulator_put;
+ }
+ }
+
+ retval = regulator_set_load(rmi4_data->bus_reg,
+ 62000);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set regulator current vdd\n",
+ __func__);
+ goto regulator_put;
+ }
+
+ retval = regulator_set_voltage(rmi4_data->bus_reg,
+ 1800000,
+ 1800000);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set regulator voltage avdd\n",
+ __func__);
+ goto regulator_put;
+ }
+
+ return 0;
+
+regulator_put:
+ if (rmi4_data->pwr_reg) {
+ regulator_put(rmi4_data->pwr_reg);
+ rmi4_data->pwr_reg = NULL;
+ }
+
+ if (rmi4_data->bus_reg) {
+ regulator_put(rmi4_data->bus_reg);
+ rmi4_data->bus_reg = NULL;
+ }
+
+ return retval;
+}
+
+static int synaptics_rmi4_enable_reg(struct synaptics_rmi4_data *rmi4_data,
+ bool enable)
+{
+ int retval;
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ if (!enable) {
+ retval = 0;
+ goto disable_pwr_reg;
+ }
+
+ if (rmi4_data->bus_reg && rmi4_data->vdd_status == 0) {
+ retval = regulator_enable(rmi4_data->bus_reg);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to enable bus pullup regulator\n",
+ __func__);
+ goto exit;
+ }
+ rmi4_data->vdd_status = 1;
+ }
+
+ if (rmi4_data->pwr_reg && rmi4_data->avdd_status == 0) {
+ retval = regulator_enable(rmi4_data->pwr_reg);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to enable power regulator\n",
+ __func__);
+ goto disable_bus_reg;
+ }
+ rmi4_data->avdd_status = 1;
+ msleep(bdata->power_delay_ms);
+ }
+
+ return 0;
+
+disable_pwr_reg:
+ if (rmi4_data->pwr_reg && rmi4_data->avdd_status == 1) {
+ regulator_disable(rmi4_data->pwr_reg);
+ rmi4_data->avdd_status = 0;
+ }
+
+disable_bus_reg:
+ if (rmi4_data->bus_reg && rmi4_data->vdd_status == 1) {
+ regulator_disable(rmi4_data->bus_reg);
+ rmi4_data->vdd_status = 0;
+ }
+
+exit:
+ return retval;
+}
+
+static int synaptics_rmi4_free_fingers(struct synaptics_rmi4_data *rmi4_data)
+{
+ unsigned char ii;
+
+ mutex_lock(&(rmi4_data->rmi4_report_mutex));
+
+#ifdef TYPE_B_PROTOCOL
+ for (ii = 0; ii < rmi4_data->num_of_fingers; ii++) {
+ input_mt_slot(rmi4_data->input_dev, ii);
+ input_mt_report_slot_state(rmi4_data->input_dev,
+ MT_TOOL_FINGER, 0);
+ }
+#endif
+ input_report_key(rmi4_data->input_dev,
+ BTN_TOUCH, 0);
+ input_report_key(rmi4_data->input_dev,
+ BTN_TOOL_FINGER, 0);
+#ifndef TYPE_B_PROTOCOL
+ input_mt_sync(rmi4_data->input_dev);
+#endif
+ input_sync(rmi4_data->input_dev);
+
+ if (rmi4_data->stylus_enable) {
+ input_report_key(rmi4_data->stylus_dev,
+ BTN_TOUCH, 0);
+ input_report_key(rmi4_data->stylus_dev,
+ BTN_TOOL_PEN, 0);
+ if (rmi4_data->eraser_enable) {
+ input_report_key(rmi4_data->stylus_dev,
+ BTN_TOOL_RUBBER, 0);
+ }
+ input_sync(rmi4_data->stylus_dev);
+ }
+
+ mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+
+ rmi4_data->fingers_on_2d = false;
+
+ return 0;
+}
+
+static int synaptics_rmi4_sw_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned char command = 0x01;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ rmi4_data->f01_cmd_base_addr,
+ &command,
+ sizeof(command));
+ if (retval < 0)
+ return retval;
+
+ msleep(rmi4_data->hw_if->board_data->reset_delay_ms);
+
+ if (rmi4_data->hw_if->ui_hw_init) {
+ retval = rmi4_data->hw_if->ui_hw_init(rmi4_data);
+ if (retval < 0)
+ return retval;
+ }
+
+ return 0;
+}
+
+static int synaptics_rmi4_do_rebuild(struct synaptics_rmi4_data *rmi4_data)
+{
+ struct synaptics_rmi4_input_settings *settings;
+
+ settings = &(rmi4_data->input_settings);
+
+ if (settings->num_of_fingers != rmi4_data->num_of_fingers)
+ return 1;
+
+ if (settings->valid_button_count != rmi4_data->valid_button_count)
+ return 1;
+
+ if (settings->max_touch_width != rmi4_data->max_touch_width)
+ return 1;
+
+ if (settings->sensor_max_x != rmi4_data->sensor_max_x)
+ return 1;
+
+ if (settings->sensor_max_y != rmi4_data->sensor_max_y)
+ return 1;
+
+ if (settings->force_min != rmi4_data->force_min)
+ return 1;
+
+ if (settings->force_max != rmi4_data->force_max)
+ return 1;
+
+ if (settings->stylus_enable != rmi4_data->stylus_enable)
+ return 1;
+
+ if (settings->eraser_enable != rmi4_data->eraser_enable)
+ return 1;
+
+ return 0;
+}
+
+static void synaptics_rmi4_rebuild_work(struct work_struct *work)
+{
+ int retval;
+ unsigned char attr_count;
+ struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+ struct delayed_work *delayed_work =
+ container_of(work, struct delayed_work, work);
+ struct synaptics_rmi4_data *rmi4_data =
+ container_of(delayed_work, struct synaptics_rmi4_data,
+ rb_work);
+
+ mutex_lock(&(rmi4_data->rmi4_reset_mutex));
+
+ mutex_lock(&exp_data.mutex);
+
+ synaptics_rmi4_irq_enable(rmi4_data, false, false);
+
+ if (!list_empty(&exp_data.list)) {
+ list_for_each_entry(exp_fhandler, &exp_data.list, link)
+ if (exp_fhandler->exp_fn->remove != NULL)
+ exp_fhandler->exp_fn->remove(rmi4_data);
+ }
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ }
+
+ synaptics_rmi4_free_fingers(rmi4_data);
+ synaptics_rmi4_empty_fn_list(rmi4_data);
+ input_unregister_device(rmi4_data->input_dev);
+ rmi4_data->input_dev = NULL;
+ if (rmi4_data->stylus_enable) {
+ input_unregister_device(rmi4_data->stylus_dev);
+ rmi4_data->stylus_dev = NULL;
+ }
+
+ retval = synaptics_rmi4_set_input_dev(rmi4_data);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set up input device\n",
+ __func__);
+ goto exit;
+ }
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create sysfs attributes\n",
+ __func__);
+ goto exit;
+ }
+ }
+
+ if (!list_empty(&exp_data.list)) {
+ list_for_each_entry(exp_fhandler, &exp_data.list, link)
+ if (exp_fhandler->exp_fn->init != NULL)
+ exp_fhandler->exp_fn->init(rmi4_data);
+ }
+
+exit:
+ synaptics_rmi4_irq_enable(rmi4_data, true, false);
+
+ mutex_unlock(&exp_data.mutex);
+
+ mutex_unlock(&(rmi4_data->rmi4_reset_mutex));
+}
+
+static int synaptics_rmi4_reinit_device(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ struct synaptics_rmi4_fn *fhandler;
+ struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+ struct synaptics_rmi4_device_info *rmi;
+
+ rmi = &(rmi4_data->rmi4_mod_info);
+
+ mutex_lock(&(rmi4_data->rmi4_reset_mutex));
+
+ synaptics_rmi4_free_fingers(rmi4_data);
+
+ if (!list_empty(&rmi->support_fn_list)) {
+ list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+ if (fhandler->fn_number == SYNAPTICS_RMI4_F12) {
+ synaptics_rmi4_f12_set_enables(rmi4_data, 0);
+ break;
+ }
+ }
+ }
+
+ retval = synaptics_rmi4_int_enable(rmi4_data, true);
+ if (retval < 0)
+ goto exit;
+
+ mutex_lock(&exp_data.mutex);
+ if (!list_empty(&exp_data.list)) {
+ list_for_each_entry(exp_fhandler, &exp_data.list, link)
+ if (exp_fhandler->exp_fn->reinit != NULL)
+ exp_fhandler->exp_fn->reinit(rmi4_data);
+ }
+ mutex_unlock(&exp_data.mutex);
+
+ synaptics_rmi4_set_configured(rmi4_data);
+
+ retval = 0;
+
+exit:
+ mutex_unlock(&(rmi4_data->rmi4_reset_mutex));
+ return retval;
+}
+
+static int synaptics_rmi4_reset_device(struct synaptics_rmi4_data *rmi4_data,
+ bool rebuild)
+{
+ int retval;
+ struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+
+ mutex_lock(&(rmi4_data->rmi4_reset_mutex));
+
+ synaptics_rmi4_irq_enable(rmi4_data, false, false);
+
+ retval = synaptics_rmi4_sw_reset(rmi4_data);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to issue reset command\n",
+ __func__);
+ goto exit;
+ }
+
+ synaptics_rmi4_free_fingers(rmi4_data);
+
+ synaptics_rmi4_empty_fn_list(rmi4_data);
+
+ retval = synaptics_rmi4_query_device(rmi4_data);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to query device\n",
+ __func__);
+ goto exit;
+ }
+
+ mutex_lock(&exp_data.mutex);
+ if (!list_empty(&exp_data.list)) {
+ list_for_each_entry(exp_fhandler, &exp_data.list, link)
+ if (exp_fhandler->exp_fn->reset != NULL)
+ exp_fhandler->exp_fn->reset(rmi4_data);
+ }
+ mutex_unlock(&exp_data.mutex);
+
+ retval = 0;
+
+exit:
+ synaptics_rmi4_irq_enable(rmi4_data, true, false);
+
+ mutex_unlock(&(rmi4_data->rmi4_reset_mutex));
+
+ if (rebuild && synaptics_rmi4_do_rebuild(rmi4_data)) {
+ queue_delayed_work(rmi4_data->rb_workqueue,
+ &rmi4_data->rb_work,
+ msecs_to_jiffies(REBUILD_WORK_DELAY_MS));
+ }
+
+ return retval;
+}
+
+#ifdef FB_READY_RESET
+static void synaptics_rmi4_reset_work(struct work_struct *work)
+{
+ int retval = 0;
+ unsigned int timeout;
+ struct synaptics_rmi4_data *rmi4_data =
+ container_of(work, struct synaptics_rmi4_data,
+ reset_work);
+
+ timeout = FB_READY_TIMEOUT_S * 1000 / FB_READY_WAIT_MS + 1;
+
+ while (!rmi4_data->fb_ready) {
+ msleep(FB_READY_WAIT_MS);
+ timeout--;
+ if (timeout == 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Timed out waiting for FB ready\n",
+ __func__);
+ goto err;
+ }
+ }
+
+ mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+ retval = synaptics_rmi4_reset_device(rmi4_data, false);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to issue reset command\n",
+ __func__);
+ }
+
+ mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+err:
+
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Timed out waiting for FB ready\n",
+ __func__);
+
+}
+#endif
+
+static int synaptics_rmi4_sleep_enable(struct synaptics_rmi4_data *rmi4_data,
+ bool enable)
+{
+ int retval;
+ unsigned char device_ctrl;
+ unsigned char no_sleep_setting = rmi4_data->no_sleep_setting;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ &device_ctrl,
+ sizeof(device_ctrl));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read device control\n",
+ __func__);
+ return retval;
+ }
+
+ device_ctrl = device_ctrl & ~MASK_3BIT;
+ if (enable)
+ device_ctrl = device_ctrl | SENSOR_SLEEP;
+ else
+ device_ctrl = device_ctrl | no_sleep_setting | NORMAL_OPERATION;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ &device_ctrl,
+ sizeof(device_ctrl));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write device control\n",
+ __func__);
+ return retval;
+ }
+
+ rmi4_data->sensor_sleep = enable;
+
+ return retval;
+}
+
+static void synaptics_rmi4_exp_fn_work(struct work_struct *work)
+{
+ struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+ struct synaptics_rmi4_exp_fhandler *exp_fhandler_temp;
+ struct synaptics_rmi4_data *rmi4_data = exp_data.rmi4_data;
+
+ mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+ mutex_lock(&rmi4_data->rmi4_reset_mutex);
+ mutex_lock(&exp_data.mutex);
+ if (!list_empty(&exp_data.list)) {
+ list_for_each_entry_safe(exp_fhandler,
+ exp_fhandler_temp,
+ &exp_data.list,
+ link) {
+ if ((exp_fhandler->exp_fn->init != NULL) &&
+ exp_fhandler->insert) {
+ exp_fhandler->exp_fn->init(rmi4_data);
+ exp_fhandler->insert = false;
+ } else if ((exp_fhandler->exp_fn->remove != NULL) &&
+ exp_fhandler->remove) {
+ exp_fhandler->exp_fn->remove(rmi4_data);
+ list_del(&exp_fhandler->link);
+ kfree(exp_fhandler);
+ }
+ }
+ }
+ mutex_unlock(&exp_data.mutex);
+ mutex_unlock(&rmi4_data->rmi4_reset_mutex);
+ mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+}
+
+void synaptics_rmi4_new_function(struct synaptics_rmi4_exp_fn *exp_fn,
+ bool insert)
+{
+ struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+
+ if (!exp_data.initialized) {
+ mutex_init(&exp_data.mutex);
+ INIT_LIST_HEAD(&exp_data.list);
+ exp_data.initialized = true;
+ }
+
+ mutex_lock(&exp_data.mutex);
+ if (insert) {
+ exp_fhandler = kzalloc(sizeof(*exp_fhandler), GFP_KERNEL);
+ if (!exp_fhandler) {
+ pr_err("%s: Failed to alloc mem for expansion function\n",
+ __func__);
+ goto exit;
+ }
+ exp_fhandler->exp_fn = exp_fn;
+ exp_fhandler->insert = true;
+ exp_fhandler->remove = false;
+ list_add_tail(&exp_fhandler->link, &exp_data.list);
+ } else if (!list_empty(&exp_data.list)) {
+ list_for_each_entry(exp_fhandler, &exp_data.list, link) {
+ if (exp_fhandler->exp_fn->fn_type == exp_fn->fn_type) {
+ exp_fhandler->insert = false;
+ exp_fhandler->remove = true;
+ goto exit;
+ }
+ }
+ }
+
+exit:
+ mutex_unlock(&exp_data.mutex);
+
+ if (exp_data.queue_work) {
+ queue_delayed_work(exp_data.workqueue,
+ &exp_data.work,
+ msecs_to_jiffies(EXP_FN_WORK_DELAY_MS));
+ }
+}
+EXPORT_SYMBOL(synaptics_rmi4_new_function);
+
+static int synaptics_rmi4_probe(struct platform_device *pdev)
+{
+ int retval;
+ unsigned char attr_count;
+ struct synaptics_rmi4_data *rmi4_data;
+ const struct synaptics_dsx_hw_interface *hw_if;
+ const struct synaptics_dsx_board_data *bdata;
+
+ hw_if = pdev->dev.platform_data;
+ if (!hw_if) {
+ dev_err(&pdev->dev,
+ "%s: No hardware interface found\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ bdata = hw_if->board_data;
+ if (!bdata) {
+ dev_err(&pdev->dev,
+ "%s: No board data found\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ rmi4_data = kzalloc(sizeof(*rmi4_data), GFP_KERNEL);
+ if (!rmi4_data) {
+ dev_err(&pdev->dev,
+ "%s: Failed to alloc mem for rmi4_data\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ rmi4_data->pdev = pdev;
+ rmi4_data->current_page = MASK_8BIT;
+ rmi4_data->hw_if = hw_if;
+ rmi4_data->suspend = false;
+ rmi4_data->irq_enabled = false;
+ rmi4_data->fingers_on_2d = false;
+
+ rmi4_data->reset_device = synaptics_rmi4_reset_device;
+ rmi4_data->irq_enable = synaptics_rmi4_irq_enable;
+ rmi4_data->sleep_enable = synaptics_rmi4_sleep_enable;
+ rmi4_data->report_touch = synaptics_rmi4_report_touch;
+
+ mutex_init(&(rmi4_data->rmi4_reset_mutex));
+ mutex_init(&(rmi4_data->rmi4_report_mutex));
+ mutex_init(&(rmi4_data->rmi4_io_ctrl_mutex));
+ mutex_init(&(rmi4_data->rmi4_exp_init_mutex));
+ mutex_init(&(rmi4_data->rmi4_irq_enable_mutex));
+
+ platform_set_drvdata(pdev, rmi4_data);
+
+ vir_button_map = bdata->vir_button_map;
+
+ retval = synaptics_rmi4_get_reg(rmi4_data, true);
+ if (retval < 0) {
+ dev_err(&pdev->dev,
+ "%s: Failed to get regulators\n",
+ __func__);
+ goto err_get_reg;
+ }
+
+ retval = synaptics_rmi4_enable_reg(rmi4_data, true);
+ if (retval < 0) {
+ dev_err(&pdev->dev,
+ "%s: Failed to enable regulators\n",
+ __func__);
+ goto err_enable_reg;
+ }
+
+ retval = synaptics_rmi4_set_gpio(rmi4_data);
+ if (retval < 0) {
+ dev_err(&pdev->dev,
+ "%s: Failed to set up GPIO's\n",
+ __func__);
+ goto err_set_gpio;
+ }
+
+ retval = synaptics_dsx_pinctrl_init(rmi4_data);
+ if (!retval && rmi4_data->ts_pinctrl) {
+ /*
+ * Pinctrl handle is optional. If pinctrl handle is found
+ * let pins to be configured in active state. If not
+ * found continue further without error.
+ */
+ retval = pinctrl_select_state(rmi4_data->ts_pinctrl,
+ rmi4_data->pinctrl_state_active);
+ if (retval < 0) {
+ dev_err(&pdev->dev,
+ "%s: Failed to select %s pinstate %d\n",
+ __func__, PINCTRL_STATE_ACTIVE, retval);
+ }
+ }
+
+ if (hw_if->ui_hw_init) {
+ retval = hw_if->ui_hw_init(rmi4_data);
+ if (retval < 0) {
+ dev_err(&pdev->dev,
+ "%s: Failed to initialize hardware interface\n",
+ __func__);
+ goto err_ui_hw_init;
+ }
+ }
+
+ retval = synaptics_rmi4_set_input_dev(rmi4_data);
+ if (retval < 0) {
+ dev_err(&pdev->dev,
+ "%s: Failed to set up input device\n",
+ __func__);
+ goto err_set_input_dev;
+ }
+
+#ifdef CONFIG_FB
+ rmi4_data->fb_notifier.notifier_call = synaptics_rmi4_dsi_panel_notifier_cb;
+ retval = msm_drm_register_client(&rmi4_data->fb_notifier);
+ if (retval < 0) {
+
+
+ dev_err(&pdev->dev,
+ "%s: Failed to register fb notifier client\n",
+ __func__);
+ }
+#endif
+
+#ifdef USE_EARLYSUSPEND
+ rmi4_data->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+ rmi4_data->early_suspend.suspend = synaptics_rmi4_early_suspend;
+ rmi4_data->early_suspend.resume = synaptics_rmi4_late_resume;
+ register_early_suspend(&rmi4_data->early_suspend);
+#endif
+
+ if (!exp_data.initialized) {
+ mutex_init(&exp_data.mutex);
+ INIT_LIST_HEAD(&exp_data.list);
+ exp_data.initialized = true;
+ }
+
+ rmi4_data->irq = gpio_to_irq(bdata->irq_gpio);
+
+ retval = synaptics_rmi4_irq_enable(rmi4_data, true, false);
+ if (retval < 0) {
+ dev_err(&pdev->dev,
+ "%s: Failed to enable attention interrupt\n",
+ __func__);
+ goto err_enable_irq;
+ }
+
+ if (vir_button_map->nbuttons) {
+ rmi4_data->board_prop_dir = kobject_create_and_add(
+ "board_properties", NULL);
+ if (!rmi4_data->board_prop_dir) {
+ dev_err(&pdev->dev,
+ "%s: Failed to create board_properties directory\n",
+ __func__);
+ goto err_virtual_buttons;
+ } else {
+ retval = sysfs_create_file(rmi4_data->board_prop_dir,
+ &virtual_key_map_attr.attr);
+ if (retval < 0) {
+ dev_err(&pdev->dev,
+ "%s: Failed to create virtual key map file\n",
+ __func__);
+ goto err_virtual_buttons;
+ }
+ }
+ }
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ if (retval < 0) {
+ dev_err(&pdev->dev,
+ "%s: Failed to create sysfs attributes\n",
+ __func__);
+ goto err_sysfs;
+ }
+ }
+
+#ifdef USE_DATA_SERVER
+ memset(&interrupt_signal, 0, sizeof(interrupt_signal));
+ interrupt_signal.si_signo = SIGIO;
+ interrupt_signal.si_code = SI_USER;
+#endif
+
+ rmi4_data->rb_workqueue =
+ create_singlethread_workqueue("dsx_rebuild_workqueue");
+ INIT_DELAYED_WORK(&rmi4_data->rb_work, synaptics_rmi4_rebuild_work);
+
+ exp_data.workqueue = create_singlethread_workqueue("dsx_exp_workqueue");
+ INIT_DELAYED_WORK(&exp_data.work, synaptics_rmi4_exp_fn_work);
+ exp_data.rmi4_data = rmi4_data;
+ exp_data.queue_work = true;
+ queue_delayed_work(exp_data.workqueue,
+ &exp_data.work,
+ 0);
+
+#ifdef FB_READY_RESET
+ rmi4_data->reset_workqueue =
+ create_singlethread_workqueue("dsx_reset_workqueue");
+ INIT_WORK(&rmi4_data->reset_work, synaptics_rmi4_reset_work);
+ queue_work(rmi4_data->reset_workqueue, &rmi4_data->reset_work);
+#endif
+
+ return retval;
+
+err_sysfs:
+ for (attr_count--; attr_count >= 0; attr_count--) {
+ sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ }
+
+err_virtual_buttons:
+ if (rmi4_data->board_prop_dir) {
+ sysfs_remove_file(rmi4_data->board_prop_dir,
+ &virtual_key_map_attr.attr);
+ kobject_put(rmi4_data->board_prop_dir);
+ }
+
+ synaptics_rmi4_irq_enable(rmi4_data, false, false);
+
+err_enable_irq:
+#ifdef CONFIG_FB
+ msm_drm_unregister_client(&rmi4_data->fb_notifier);
+#endif
+
+#ifdef USE_EARLYSUSPEND
+ unregister_early_suspend(&rmi4_data->early_suspend);
+#endif
+
+ synaptics_rmi4_empty_fn_list(rmi4_data);
+ input_unregister_device(rmi4_data->input_dev);
+ rmi4_data->input_dev = NULL;
+ if (rmi4_data->stylus_enable) {
+ input_unregister_device(rmi4_data->stylus_dev);
+ rmi4_data->stylus_dev = NULL;
+ }
+
+err_set_input_dev:
+ synaptics_rmi4_gpio_setup(bdata->irq_gpio, false, 0, 0);
+
+ if (bdata->reset_gpio >= 0)
+ synaptics_rmi4_gpio_setup(bdata->reset_gpio, false, 0, 0);
+
+ if (bdata->power_gpio >= 0)
+ synaptics_rmi4_gpio_setup(bdata->power_gpio, false, 0, 0);
+
+err_ui_hw_init:
+err_set_gpio:
+ synaptics_rmi4_enable_reg(rmi4_data, false);
+
+ if (rmi4_data->ts_pinctrl) {
+ if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_release)) {
+ devm_pinctrl_put(rmi4_data->ts_pinctrl);
+ rmi4_data->ts_pinctrl = NULL;
+ } else {
+ retval = pinctrl_select_state(
+ rmi4_data->ts_pinctrl,
+ rmi4_data->pinctrl_state_release);
+ if (retval)
+ dev_err(&pdev->dev,
+ "%s: Failed to create sysfs attributes\n",
+ __func__);
+ }
+ }
+
+err_enable_reg:
+ synaptics_rmi4_get_reg(rmi4_data, false);
+
+err_get_reg:
+ kfree(rmi4_data);
+
+ return retval;
+}
+
+static int synaptics_rmi4_remove(struct platform_device *pdev)
+{
+ unsigned char attr_count;
+ struct synaptics_rmi4_data *rmi4_data = platform_get_drvdata(pdev);
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+#ifdef FB_READY_RESET
+ cancel_work_sync(&rmi4_data->reset_work);
+ flush_workqueue(rmi4_data->reset_workqueue);
+ destroy_workqueue(rmi4_data->reset_workqueue);
+#endif
+
+ cancel_delayed_work_sync(&exp_data.work);
+ flush_workqueue(exp_data.workqueue);
+ destroy_workqueue(exp_data.workqueue);
+
+ cancel_delayed_work_sync(&rmi4_data->rb_work);
+ flush_workqueue(rmi4_data->rb_workqueue);
+ destroy_workqueue(rmi4_data->rb_workqueue);
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ }
+
+ if (rmi4_data->board_prop_dir) {
+ sysfs_remove_file(rmi4_data->board_prop_dir,
+ &virtual_key_map_attr.attr);
+ kobject_put(rmi4_data->board_prop_dir);
+ }
+
+ synaptics_rmi4_irq_enable(rmi4_data, false, false);
+
+#ifdef CONFIG_FB
+ msm_drm_unregister_client(&rmi4_data->fb_notifier);
+#endif
+
+#ifdef USE_EARLYSUSPEND
+ unregister_early_suspend(&rmi4_data->early_suspend);
+#endif
+
+ synaptics_rmi4_empty_fn_list(rmi4_data);
+ input_unregister_device(rmi4_data->input_dev);
+ rmi4_data->input_dev = NULL;
+ if (rmi4_data->stylus_enable) {
+ input_unregister_device(rmi4_data->stylus_dev);
+ rmi4_data->stylus_dev = NULL;
+ }
+
+ synaptics_rmi4_gpio_setup(bdata->irq_gpio, false, 0, 0);
+
+ if (bdata->reset_gpio >= 0)
+ synaptics_rmi4_gpio_setup(bdata->reset_gpio, false, 0, 0);
+
+ if (bdata->power_gpio >= 0)
+ synaptics_rmi4_gpio_setup(bdata->power_gpio, false, 0, 0);
+
+ if (rmi4_data->ts_pinctrl) {
+ if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_release)) {
+ devm_pinctrl_put(rmi4_data->ts_pinctrl);
+ rmi4_data->ts_pinctrl = NULL;
+ } else {
+ pinctrl_select_state(
+ rmi4_data->ts_pinctrl,
+ rmi4_data->pinctrl_state_release);
+ }
+ }
+
+ synaptics_rmi4_enable_reg(rmi4_data, false);
+ synaptics_rmi4_get_reg(rmi4_data, false);
+
+ kfree(rmi4_data);
+
+ return 0;
+}
+
+#ifdef CONFIG_FB
+static int synaptics_rmi4_dsi_panel_notifier_cb(struct notifier_block *self,
+ unsigned long event, void *data)
+{
+ int transition;
+ struct msm_drm_notifier *evdata = data;
+ struct synaptics_rmi4_data *rmi4_data =
+ container_of(self, struct synaptics_rmi4_data,
+ fb_notifier);
+
+ if (!evdata || (evdata->id != 0))
+ return 0;
+
+ if (evdata && evdata->data && rmi4_data) {
+ if (event == MSM_DRM_EVENT_BLANK) {
+ transition = *(int *)evdata->data;
+ if (transition == MSM_DRM_BLANK_POWERDOWN) {
+ synaptics_rmi4_suspend(&rmi4_data->pdev->dev);
+ rmi4_data->fb_ready = false;
+ } else if (transition == MSM_DRM_BLANK_UNBLANK) {
+ synaptics_rmi4_resume(&rmi4_data->pdev->dev);
+ rmi4_data->fb_ready = true;
+ }
+ }
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef USE_EARLYSUSPEND
+static int synaptics_rmi4_early_suspend(struct early_suspend *h)
+{
+ struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+ struct synaptics_rmi4_data *rmi4_data =
+ container_of(h, struct synaptics_rmi4_data,
+ early_suspend);
+ unsigned char device_ctrl;
+
+ if (rmi4_data->stay_awake)
+ return retval;
+
+ if (rmi4_data->enable_wakeup_gesture) {
+ if (rmi4_data->no_sleep_setting) {
+ synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ &device_ctrl,
+ sizeof(device_ctrl));
+ device_ctrl = device_ctrl & ~NO_SLEEP_ON;
+ synaptics_rmi4_reg_write(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ &device_ctrl,
+ sizeof(device_ctrl));
+ }
+ synaptics_rmi4_wakeup_gesture(rmi4_data, true);
+ enable_irq_wake(rmi4_data->irq);
+ goto exit;
+ }
+
+#ifdef SYNA_TDDI
+ if (rmi4_data->no_sleep_setting) {
+ synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ &device_ctrl,
+ sizeof(device_ctrl));
+ device_ctrl = device_ctrl & ~NO_SLEEP_ON;
+ synaptics_rmi4_reg_write(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ &device_ctrl,
+ sizeof(device_ctrl));
+ }
+ synaptics_rmi4_wakeup_gesture(rmi4_data, true);
+ usleep(TDDI_LPWG_WAIT_US);
+#endif
+ synaptics_rmi4_irq_enable(rmi4_data, false, false);
+ synaptics_rmi4_sleep_enable(rmi4_data, true);
+ synaptics_rmi4_free_fingers(rmi4_data);
+
+exit:
+ mutex_lock(&exp_data.mutex);
+ if (!list_empty(&exp_data.list)) {
+ list_for_each_entry(exp_fhandler, &exp_data.list, link)
+ if (exp_fhandler->exp_fn->early_suspend != NULL)
+ exp_fhandler->exp_fn->early_suspend(rmi4_data);
+ }
+ mutex_unlock(&exp_data.mutex);
+
+ rmi4_data->suspend = true;
+
+ return retval;
+}
+
+static int synaptics_rmi4_late_resume(struct early_suspend *h)
+{
+#ifdef FB_READY_RESET
+ int retval;
+#endif
+ struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+ struct synaptics_rmi4_data *rmi4_data =
+ container_of(h, struct synaptics_rmi4_data,
+ early_suspend);
+
+ if (rmi4_data->stay_awake)
+ return retval;
+
+ if (rmi4_data->enable_wakeup_gesture) {
+ disable_irq_wake(rmi4_data->irq);
+ goto exit;
+ }
+
+ rmi4_data->current_page = MASK_8BIT;
+
+ if (rmi4_data->suspend) {
+ synaptics_rmi4_sleep_enable(rmi4_data, false);
+ synaptics_rmi4_irq_enable(rmi4_data, true, false);
+ }
+
+exit:
+#ifdef FB_READY_RESET
+ if (rmi4_data->suspend) {
+ retval = synaptics_rmi4_reset_device(rmi4_data, false);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to issue reset command\n",
+ __func__);
+ }
+ }
+#endif
+ mutex_lock(&exp_data.mutex);
+ if (!list_empty(&exp_data.list)) {
+ list_for_each_entry(exp_fhandler, &exp_data.list, link)
+ if (exp_fhandler->exp_fn->late_resume != NULL)
+ exp_fhandler->exp_fn->late_resume(rmi4_data);
+ }
+ mutex_unlock(&exp_data.mutex);
+
+ rmi4_data->suspend = false;
+
+ return retval;
+}
+#endif
+
+static int synaptics_rmi4_suspend(struct device *dev)
+{
+ struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+ unsigned char device_ctrl;
+
+ if (rmi4_data->stay_awake)
+ return 0;
+
+ if (rmi4_data->enable_wakeup_gesture) {
+ if (rmi4_data->no_sleep_setting) {
+ synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ &device_ctrl,
+ sizeof(device_ctrl));
+ device_ctrl = device_ctrl & ~NO_SLEEP_ON;
+ synaptics_rmi4_reg_write(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ &device_ctrl,
+ sizeof(device_ctrl));
+ }
+ synaptics_rmi4_wakeup_gesture(rmi4_data, true);
+ enable_irq_wake(rmi4_data->irq);
+ goto exit;
+ }
+
+ if (!rmi4_data->suspend) {
+#ifdef SYNA_TDDI
+ if (rmi4_data->no_sleep_setting) {
+ synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ &device_ctrl,
+ sizeof(device_ctrl));
+ device_ctrl = device_ctrl & ~NO_SLEEP_ON;
+ synaptics_rmi4_reg_write(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ &device_ctrl,
+ sizeof(device_ctrl));
+ }
+ synaptics_rmi4_wakeup_gesture(rmi4_data, true);
+ usleep(TDDI_LPWG_WAIT_US);
+#endif
+ synaptics_rmi4_irq_enable(rmi4_data, false, false);
+ synaptics_rmi4_sleep_enable(rmi4_data, true);
+ synaptics_rmi4_free_fingers(rmi4_data);
+ }
+
+ if (rmi4_data->ts_pinctrl)
+ pinctrl_select_state(rmi4_data->ts_pinctrl,
+ rmi4_data->pinctrl_state_suspend);
+
+ synaptics_rmi4_enable_reg(rmi4_data, false);
+
+exit:
+ mutex_lock(&exp_data.mutex);
+ if (!list_empty(&exp_data.list)) {
+ list_for_each_entry(exp_fhandler, &exp_data.list, link)
+ if (exp_fhandler->exp_fn->suspend != NULL)
+ exp_fhandler->exp_fn->suspend(rmi4_data);
+ }
+ mutex_unlock(&exp_data.mutex);
+
+ rmi4_data->suspend = true;
+
+ return 0;
+}
+
+static int synaptics_rmi4_resume(struct device *dev)
+{
+#ifdef FB_READY_RESET
+ int retval;
+#endif
+ struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+ if (rmi4_data->stay_awake)
+ return 0;
+
+ if (rmi4_data->enable_wakeup_gesture) {
+ disable_irq_wake(rmi4_data->irq);
+ synaptics_rmi4_wakeup_gesture(rmi4_data, false);
+ goto exit;
+ }
+
+ synaptics_rmi4_enable_reg(rmi4_data, true);
+
+ if (rmi4_data->ts_pinctrl)
+ pinctrl_select_state(rmi4_data->ts_pinctrl,
+ rmi4_data->pinctrl_state_active);
+
+ rmi4_data->current_page = MASK_8BIT;
+
+ synaptics_rmi4_sleep_enable(rmi4_data, false);
+ synaptics_rmi4_irq_enable(rmi4_data, true, false);
+
+exit:
+#ifdef FB_READY_RESET
+ retval = synaptics_rmi4_reset_device(rmi4_data, false);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to issue reset command\n",
+ __func__);
+ }
+#endif
+ mutex_lock(&exp_data.mutex);
+ if (!list_empty(&exp_data.list)) {
+ list_for_each_entry(exp_fhandler, &exp_data.list, link)
+ if (exp_fhandler->exp_fn->resume != NULL)
+ exp_fhandler->exp_fn->resume(rmi4_data);
+ }
+ mutex_unlock(&exp_data.mutex);
+
+ rmi4_data->suspend = false;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops synaptics_rmi4_dev_pm_ops = {
+#ifndef CONFIG_FB
+ .suspend = synaptics_rmi4_suspend,
+ .resume = synaptics_rmi4_resume,
+#endif
+};
+#endif
+
+static struct platform_driver synaptics_rmi4_driver = {
+ .driver = {
+ .name = PLATFORM_DRIVER_NAME,
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &synaptics_rmi4_dev_pm_ops,
+#endif
+ },
+ .probe = synaptics_rmi4_probe,
+ .remove = synaptics_rmi4_remove,
+};
+
+static int __init synaptics_rmi4_init(void)
+{
+ int retval;
+
+ retval = synaptics_rmi4_bus_init();
+ if (retval)
+ return retval;
+
+ return platform_driver_register(&synaptics_rmi4_driver);
+}
+
+static void __exit synaptics_rmi4_exit(void)
+{
+ platform_driver_unregister(&synaptics_rmi4_driver);
+
+ synaptics_rmi4_bus_exit();
+}
+
+module_init(synaptics_rmi4_init);
+module_exit(synaptics_rmi4_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Touch Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.h b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.h
new file mode 100644
index 0000000..3e0c0db
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.h
@@ -0,0 +1,535 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#ifndef _SYNAPTICS_DSX_RMI4_H_
+#define _SYNAPTICS_DSX_RMI4_H_
+
+#define SYNAPTICS_DS4 (1 << 0)
+#define SYNAPTICS_DS5 (1 << 1)
+#define SYNAPTICS_DSX_DRIVER_PRODUCT (SYNAPTICS_DS4 | SYNAPTICS_DS5)
+#define SYNAPTICS_DSX_DRIVER_VERSION 0x2070
+
+#include <linux/version.h>
+#ifdef CONFIG_FB
+#include <linux/notifier.h>
+#include <linux/fb.h>
+#endif
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38))
+#define KERNEL_ABOVE_2_6_38
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+#define KERNEL_ABOVE_3_6
+#endif
+
+#ifdef KERNEL_ABOVE_2_6_38
+#define sstrtoul(...) kstrtoul(__VA_ARGS__)
+#else
+#define sstrtoul(...) strict_strtoul(__VA_ARGS__)
+#endif
+/*
+*#define F51_DISCRETE_FORCE
+*#ifdef F51_DISCRETE_FORCE
+*#define FORCE_LEVEL_ADDR 0x0419
+*#define FORCE_LEVEL_MAX 255
+*#define CAL_DATA_SIZE 144
+*#endif
+*#define SYNA_TDDI
+*/
+#define PDT_PROPS (0X00EF)
+#define PDT_START (0x00E9)
+#define PDT_END (0x00D0)
+#define PDT_ENTRY_SIZE (0x0006)
+#define PAGES_TO_SERVICE (10)
+#define PAGE_SELECT_LEN (2)
+#define ADDRESS_LEN (2)
+
+#define SYNAPTICS_RMI4_F01 (0x01)
+#define SYNAPTICS_RMI4_F11 (0x11)
+#define SYNAPTICS_RMI4_F12 (0x12)
+#define SYNAPTICS_RMI4_F1A (0x1A)
+#define SYNAPTICS_RMI4_F21 (0x21)
+#define SYNAPTICS_RMI4_F34 (0x34)
+#define SYNAPTICS_RMI4_F35 (0x35)
+#define SYNAPTICS_RMI4_F38 (0x38)
+#define SYNAPTICS_RMI4_F51 (0x51)
+#define SYNAPTICS_RMI4_F54 (0x54)
+#define SYNAPTICS_RMI4_F55 (0x55)
+#define SYNAPTICS_RMI4_FDB (0xDB)
+
+#define PRODUCT_INFO_SIZE 2
+#define PRODUCT_ID_SIZE 10
+#define BUILD_ID_SIZE 3
+
+#define F12_FINGERS_TO_SUPPORT 10
+#define F12_NO_OBJECT_STATUS 0x00
+#define F12_FINGER_STATUS 0x01
+#define F12_ACTIVE_STYLUS_STATUS 0x02
+#define F12_PALM_STATUS 0x03
+#define F12_HOVERING_FINGER_STATUS 0x05
+#define F12_GLOVED_FINGER_STATUS 0x06
+#define F12_NARROW_OBJECT_STATUS 0x07
+#define F12_HAND_EDGE_STATUS 0x08
+#define F12_COVER_STATUS 0x0A
+#define F12_STYLUS_STATUS 0x0B
+#define F12_ERASER_STATUS 0x0C
+#define F12_SMALL_OBJECT_STATUS 0x0D
+
+#define F12_GESTURE_DETECTION_LEN 5
+
+#define MAX_NUMBER_OF_BUTTONS 4
+#define MAX_INTR_REGISTERS 4
+
+#define MASK_16BIT 0xFFFF
+#define MASK_8BIT 0xFF
+#define MASK_7BIT 0x7F
+#define MASK_6BIT 0x3F
+#define MASK_5BIT 0x1F
+#define MASK_4BIT 0x0F
+#define MASK_3BIT 0x07
+#define MASK_2BIT 0x03
+#define MASK_1BIT 0x01
+
+#define PINCTRL_STATE_ACTIVE "pmx_ts_active"
+#define PINCTRL_STATE_SUSPEND "pmx_ts_suspend"
+#define PINCTRL_STATE_RELEASE "pmx_ts_release"
+
+enum exp_fn {
+ RMI_DEV = 0,
+ RMI_FW_UPDATER,
+ RMI_TEST_REPORTING,
+ RMI_PROXIMITY,
+ RMI_ACTIVE_PEN,
+ RMI_GESTURE,
+ RMI_VIDEO,
+ RMI_DEBUG,
+ RMI_LAST,
+};
+
+/*
+ * struct synaptics_rmi4_fn_desc - function descriptor fields in PDT entry
+ * @query_base_addr: base address for query registers
+ * @cmd_base_addr: base address for command registers
+ * @ctrl_base_addr: base address for control registers
+ * @data_base_addr: base address for data registers
+ * @intr_src_count: number of interrupt sources
+ * @fn_version: version of function
+ * @fn_number: function number
+ */
+struct synaptics_rmi4_fn_desc {
+ union {
+ struct {
+ unsigned char query_base_addr;
+ unsigned char cmd_base_addr;
+ unsigned char ctrl_base_addr;
+ unsigned char data_base_addr;
+ unsigned char intr_src_count:3;
+ unsigned char reserved_1:2;
+ unsigned char fn_version:2;
+ unsigned char reserved_2:1;
+ unsigned char fn_number;
+ } __packed;
+ unsigned char data[6];
+ };
+};
+
+/*
+ * synaptics_rmi4_fn_full_addr - full 16-bit base addresses
+ * @query_base: 16-bit base address for query registers
+ * @cmd_base: 16-bit base address for command registers
+ * @ctrl_base: 16-bit base address for control registers
+ * @data_base: 16-bit base address for data registers
+ */
+struct synaptics_rmi4_fn_full_addr {
+ unsigned short query_base;
+ unsigned short cmd_base;
+ unsigned short ctrl_base;
+ unsigned short data_base;
+};
+
+/*
+ * struct synaptics_rmi4_f11_extra_data - extra data of F$11
+ * @data38_offset: offset to F11_2D_DATA38 register
+ */
+struct synaptics_rmi4_f11_extra_data {
+ unsigned char data38_offset;
+};
+
+/*
+ * struct synaptics_rmi4_f12_extra_data - extra data of F$12
+ * @data1_offset: offset to F12_2D_DATA01 register
+ * @data4_offset: offset to F12_2D_DATA04 register
+ * @data15_offset: offset to F12_2D_DATA15 register
+ * @data15_size: size of F12_2D_DATA15 register
+ * @data15_data: buffer for reading F12_2D_DATA15 register
+ * @data29_offset: offset to F12_2D_DATA29 register
+ * @data29_size: size of F12_2D_DATA29 register
+ * @data29_data: buffer for reading F12_2D_DATA29 register
+ * @ctrl20_offset: offset to F12_2D_CTRL20 register
+ */
+struct synaptics_rmi4_f12_extra_data {
+ unsigned char data1_offset;
+ unsigned char data4_offset;
+ unsigned char data15_offset;
+ unsigned char data15_size;
+ unsigned char data15_data[(F12_FINGERS_TO_SUPPORT + 7) / 8];
+ unsigned char data29_offset;
+ unsigned char data29_size;
+ unsigned char data29_data[F12_FINGERS_TO_SUPPORT * 2];
+ unsigned char ctrl20_offset;
+};
+
+/*
+ * struct synaptics_rmi4_fn - RMI function handler
+ * @fn_number: function number
+ * @num_of_data_sources: number of data sources
+ * @num_of_data_points: maximum number of fingers supported
+ * @intr_reg_num: index to associated interrupt register
+ * @intr_mask: interrupt mask
+ * @full_addr: full 16-bit base addresses of function registers
+ * @link: linked list for function handlers
+ * @data_size: size of private data
+ * @data: pointer to private data
+ * @extra: pointer to extra data
+ */
+struct synaptics_rmi4_fn {
+ unsigned char fn_number;
+ unsigned char num_of_data_sources;
+ unsigned char num_of_data_points;
+ unsigned char intr_reg_num;
+ unsigned char intr_mask;
+ struct synaptics_rmi4_fn_full_addr full_addr;
+ struct list_head link;
+ int data_size;
+ void *data;
+ void *extra;
+};
+
+/*
+ * struct synaptics_rmi4_input_settings - current input settings
+ * @num_of_fingers: maximum number of fingers for 2D touch
+ * @valid_button_count: number of valid 0D buttons
+ * @max_touch_width: maximum touch width
+ * @sensor_max_x: maximum x coordinate for 2D touch
+ * @sensor_max_y: maximum y coordinate for 2D touch
+ * @force_min: minimum force value
+ * @force_max: maximum force value
+ * @stylus_enable: flag to indicate reporting of stylus data
+ * @eraser_enable: flag to indicate reporting of eraser data
+ */
+struct synaptics_rmi4_input_settings {
+ unsigned char num_of_fingers;
+ unsigned char valid_button_count;
+ unsigned char max_touch_width;
+ int sensor_max_x;
+ int sensor_max_y;
+ int force_min;
+ int force_max;
+ bool stylus_enable;
+ bool eraser_enable;
+};
+
+/*
+ * struct synaptics_rmi4_device_info - device information
+ * @version_major: RMI protocol major version number
+ * @version_minor: RMI protocol minor version number
+ * @manufacturer_id: manufacturer ID
+ * @product_props: product properties
+ * @product_info: product information
+ * @product_id_string: product ID
+ * @build_id: firmware build ID
+ * @support_fn_list: linked list for function handlers
+ */
+struct synaptics_rmi4_device_info {
+ unsigned int version_major;
+ unsigned int version_minor;
+ unsigned char manufacturer_id;
+ unsigned char product_props;
+ unsigned char product_info[PRODUCT_INFO_SIZE];
+ unsigned char product_id_string[PRODUCT_ID_SIZE + 1];
+ unsigned char build_id[BUILD_ID_SIZE];
+ struct list_head support_fn_list;
+};
+
+/*
+ * struct synaptics_rmi4_data - RMI4 device instance data
+ * @pdev: pointer to platform device
+ * @input_dev: pointer to associated input device
+ * @stylus_dev: pointer to associated stylus device
+ * @hw_if: pointer to hardware interface data
+ * @rmi4_mod_info: device information
+ * @board_prop_dir: /sys/board_properties directory for virtual key map file
+ * @pwr_reg: pointer to regulator for power control
+ * @bus_reg: pointer to regulator for bus pullup control
+ * @rmi4_reset_mutex: mutex for software reset
+ * @rmi4_report_mutex: mutex for input event reporting
+ * @rmi4_io_ctrl_mutex: mutex for communication interface I/O
+ * @rmi4_exp_init_mutex: mutex for expansion function module initialization
+ * @rmi4_irq_enable_mutex: mutex for enabling/disabling interrupt
+ * @rb_work: work for rebuilding input device
+ * @rb_workqueue: workqueue for rebuilding input device
+ * @fb_notifier: framebuffer notifier client
+ * @reset_work: work for issuing reset after display framebuffer ready
+ * @reset_workqueue: workqueue for issuing reset after display framebuffer ready
+ * @early_suspend: early suspend power management
+ * @current_page: current RMI page for register access
+ * @button_0d_enabled: switch for enabling 0d button support
+ * @num_of_tx: number of Tx channels for 2D touch
+ * @num_of_rx: number of Rx channels for 2D touch
+ * @num_of_fingers: maximum number of fingers for 2D touch
+ * @max_touch_width: maximum touch width
+ * @valid_button_count: number of valid 0D buttons
+ * @report_enable: input data to report for F$12
+ * @no_sleep_setting: default setting of NoSleep in F01_RMI_CTRL00 register
+ * @gesture_detection: detected gesture type and properties
+ * @intr_mask: interrupt enable mask
+ * @button_txrx_mapping: Tx Rx mapping of 0D buttons
+ * @num_of_intr_regs: number of interrupt registers
+ * @f01_query_base_addr: query base address for f$01
+ * @f01_cmd_base_addr: command base address for f$01
+ * @f01_ctrl_base_addr: control base address for f$01
+ * @f01_data_base_addr: data base address for f$01
+ * @f51_query_base_addr: query base address for f$51
+ * @firmware_id: firmware build ID
+ * @irq: attention interrupt
+ * @sensor_max_x: maximum x coordinate for 2D touch
+ * @sensor_max_y: maximum y coordinate for 2D touch
+ * @force_min: minimum force value
+ * @force_max: maximum force value
+ * @set_wakeup_gesture: location of set wakeup gesture
+ * @flash_prog_mode: flag to indicate flash programming mode status
+ * @irq_enabled: flag to indicate attention interrupt enable status
+ * @fingers_on_2d: flag to indicate presence of fingers in 2D area
+ * @suspend: flag to indicate whether in suspend state
+ * @sensor_sleep: flag to indicate sleep state of sensor
+ * @stay_awake: flag to indicate whether to stay awake during suspend
+ * @fb_ready: flag to indicate whether display framebuffer in ready state
+ * @f11_wakeup_gesture: flag to indicate support for wakeup gestures in F$11
+ * @f12_wakeup_gesture: flag to indicate support for wakeup gestures in F$12
+ * @enable_wakeup_gesture: flag to indicate usage of wakeup gestures
+ * @wedge_sensor: flag to indicate use of wedge sensor
+ * @report_pressure: flag to indicate reporting of pressure data
+ * @stylus_enable: flag to indicate reporting of stylus data
+ * @eraser_enable: flag to indicate reporting of eraser data
+ * @external_afe_buttons: flag to indicate presence of external AFE buttons
+ * @reset_device: pointer to device reset function
+ * @irq_enable: pointer to interrupt enable function
+ * @sleep_enable: pointer to sleep enable function
+ * @report_touch: pointer to touch reporting function
+ */
+struct synaptics_rmi4_data {
+ struct platform_device *pdev;
+ struct input_dev *input_dev;
+ struct input_dev *stylus_dev;
+ const struct synaptics_dsx_hw_interface *hw_if;
+ struct synaptics_rmi4_device_info rmi4_mod_info;
+ struct synaptics_rmi4_input_settings input_settings;
+ struct kobject *board_prop_dir;
+ struct regulator *pwr_reg;
+ struct regulator *bus_reg;
+ struct mutex rmi4_reset_mutex;
+ struct mutex rmi4_report_mutex;
+ struct mutex rmi4_io_ctrl_mutex;
+ struct mutex rmi4_exp_init_mutex;
+ struct mutex rmi4_irq_enable_mutex;
+ struct delayed_work rb_work;
+ struct workqueue_struct *rb_workqueue;
+ struct pinctrl *ts_pinctrl;
+ struct pinctrl_state *pinctrl_state_active;
+ struct pinctrl_state *pinctrl_state_suspend;
+ struct pinctrl_state *pinctrl_state_release;
+#ifdef CONFIG_FB
+ struct notifier_block fb_notifier;
+ struct work_struct reset_work;
+ struct workqueue_struct *reset_workqueue;
+#endif
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend early_suspend;
+#endif
+ unsigned char current_page;
+ unsigned char button_0d_enabled;
+ unsigned char num_of_tx;
+ unsigned char num_of_rx;
+ unsigned char num_of_fingers;
+ unsigned char max_touch_width;
+ unsigned char valid_button_count;
+ unsigned char report_enable;
+ unsigned char no_sleep_setting;
+ unsigned char gesture_detection[F12_GESTURE_DETECTION_LEN];
+ unsigned char intr_mask[MAX_INTR_REGISTERS];
+ unsigned char *button_txrx_mapping;
+ unsigned short num_of_intr_regs;
+ unsigned short f01_query_base_addr;
+ unsigned short f01_cmd_base_addr;
+ unsigned short f01_ctrl_base_addr;
+ unsigned short f01_data_base_addr;
+#ifdef F51_DISCRETE_FORCE
+ unsigned short f51_query_base_addr;
+#endif
+ unsigned int firmware_id;
+ int irq;
+ int sensor_max_x;
+ int sensor_max_y;
+ int force_min;
+ int force_max;
+ int set_wakeup_gesture;
+ int avdd_status;
+ int vdd_status;
+ bool flash_prog_mode;
+ bool irq_enabled;
+ bool fingers_on_2d;
+ bool suspend;
+ bool sensor_sleep;
+ bool stay_awake;
+ bool fb_ready;
+ bool f11_wakeup_gesture;
+ bool f12_wakeup_gesture;
+ bool enable_wakeup_gesture;
+ bool wedge_sensor;
+ bool report_pressure;
+ bool stylus_enable;
+ bool eraser_enable;
+ bool external_afe_buttons;
+ int (*reset_device)(struct synaptics_rmi4_data *rmi4_data,
+ bool rebuild);
+ int (*irq_enable)(struct synaptics_rmi4_data *rmi4_data, bool enable,
+ bool attn_only);
+ int (*sleep_enable)(struct synaptics_rmi4_data *rmi4_data,
+ bool enable);
+ void (*report_touch)(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler);
+};
+
+struct synaptics_dsx_bus_access {
+ unsigned char type;
+ int (*read)(struct synaptics_rmi4_data *rmi4_data, unsigned short addr,
+ unsigned char *data, unsigned int length);
+ int (*write)(struct synaptics_rmi4_data *rmi4_data, unsigned short addr,
+ unsigned char *data, unsigned int length);
+};
+
+struct synaptics_dsx_hw_interface {
+ struct synaptics_dsx_board_data *board_data;
+ const struct synaptics_dsx_bus_access *bus_access;
+ int (*bl_hw_init)(struct synaptics_rmi4_data *rmi4_data);
+ int (*ui_hw_init)(struct synaptics_rmi4_data *rmi4_data);
+};
+
+struct synaptics_rmi4_exp_fn {
+ enum exp_fn fn_type;
+ int (*init)(struct synaptics_rmi4_data *rmi4_data);
+ void (*remove)(struct synaptics_rmi4_data *rmi4_data);
+ void (*reset)(struct synaptics_rmi4_data *rmi4_data);
+ void (*reinit)(struct synaptics_rmi4_data *rmi4_data);
+ void (*early_suspend)(struct synaptics_rmi4_data *rmi4_data);
+ void (*suspend)(struct synaptics_rmi4_data *rmi4_data);
+ void (*resume)(struct synaptics_rmi4_data *rmi4_data);
+ void (*late_resume)(struct synaptics_rmi4_data *rmi4_data);
+ void (*attn)(struct synaptics_rmi4_data *rmi4_data,
+ unsigned char intr_mask);
+};
+
+int synaptics_rmi4_bus_init(void);
+
+void synaptics_rmi4_bus_exit(void);
+
+void synaptics_rmi4_new_function(struct synaptics_rmi4_exp_fn *exp_fn_module,
+ bool insert);
+
+int synaptics_fw_updater(const unsigned char *fw_data);
+
+static inline int synaptics_rmi4_reg_read(
+ struct synaptics_rmi4_data *rmi4_data,
+ unsigned short addr,
+ unsigned char *data,
+ unsigned int len)
+{
+ return rmi4_data->hw_if->bus_access->read(rmi4_data, addr, data, len);
+}
+
+static inline int synaptics_rmi4_reg_write(
+ struct synaptics_rmi4_data *rmi4_data,
+ unsigned short addr,
+ unsigned char *data,
+ unsigned int len)
+{
+ return rmi4_data->hw_if->bus_access->write(rmi4_data, addr, data, len);
+}
+
+static inline ssize_t synaptics_rmi4_show_error(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ dev_warn(dev, "%s Attempted to read from write-only attribute %s\n",
+ __func__, attr->attr.name);
+ return -EPERM;
+}
+
+static inline ssize_t synaptics_rmi4_store_error(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ dev_warn(dev, "%s Attempted to write to read-only attribute %s\n",
+ __func__, attr->attr.name);
+ return -EPERM;
+}
+
+static inline int secure_memcpy(unsigned char *dest, unsigned int dest_size,
+ const unsigned char *src, unsigned int src_size,
+ unsigned int count)
+{
+ if (dest == NULL || src == NULL)
+ return -EINVAL;
+
+ if (count > dest_size || count > src_size)
+ return -EINVAL;
+
+ memcpy((void *)dest, (const void *)src, count);
+
+ return 0;
+}
+
+static inline void batohs(unsigned short *dest, unsigned char *src)
+{
+ *dest = src[1] * 0x100 + src[0];
+}
+
+static inline void hstoba(unsigned char *dest, unsigned short src)
+{
+ dest[0] = src % 0x100;
+ dest[1] = src / 0x100;
+}
+
+#endif
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c
new file mode 100644
index 0000000..7f62e01
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c
@@ -0,0 +1,5809 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define FW_IHEX_NAME "synaptics/startup_fw_update.bin"
+#define FW_IMAGE_NAME "synaptics/startup_fw_update.img"
+/*
+*#define DO_STARTUP_FW_UPDATE
+*/
+/*
+*#ifdef DO_STARTUP_FW_UPDATE
+*#ifdef CONFIG_FB
+*#define WAIT_FOR_FB_READY
+*#define FB_READY_WAIT_MS 100
+*#define FB_READY_TIMEOUT_S 30
+*#endif
+*#endif
+*/
+/*
+*#define MAX_WRITE_SIZE 4096
+*/
+
+#define ENABLE_SYS_REFLASH false
+#define FORCE_UPDATE false
+#define DO_LOCKDOWN false
+
+#define MAX_IMAGE_NAME_LEN 256
+#define MAX_FIRMWARE_ID_LEN 10
+
+#define IMAGE_HEADER_VERSION_05 0x05
+#define IMAGE_HEADER_VERSION_06 0x06
+#define IMAGE_HEADER_VERSION_10 0x10
+
+#define IMAGE_AREA_OFFSET 0x100
+#define LOCKDOWN_SIZE 0x50
+
+#define MAX_UTILITY_PARAMS 20
+
+#define V5V6_BOOTLOADER_ID_OFFSET 0
+#define V5V6_CONFIG_ID_SIZE 4
+
+#define V5_PROPERTIES_OFFSET 2
+#define V5_BLOCK_SIZE_OFFSET 3
+#define V5_BLOCK_COUNT_OFFSET 5
+#define V5_BLOCK_NUMBER_OFFSET 0
+#define V5_BLOCK_DATA_OFFSET 2
+
+#define V6_PROPERTIES_OFFSET 1
+#define V6_BLOCK_SIZE_OFFSET 2
+#define V6_BLOCK_COUNT_OFFSET 3
+#define V6_PROPERTIES_2_OFFSET 4
+#define V6_GUEST_CODE_BLOCK_COUNT_OFFSET 5
+#define V6_BLOCK_NUMBER_OFFSET 0
+#define V6_BLOCK_DATA_OFFSET 1
+#define V6_FLASH_COMMAND_OFFSET 2
+#define V6_FLASH_STATUS_OFFSET 3
+
+#define V7_CONFIG_ID_SIZE 32
+
+#define V7_FLASH_STATUS_OFFSET 0
+#define V7_PARTITION_ID_OFFSET 1
+#define V7_BLOCK_NUMBER_OFFSET 2
+#define V7_TRANSFER_LENGTH_OFFSET 3
+#define V7_COMMAND_OFFSET 4
+#define V7_PAYLOAD_OFFSET 5
+
+#define V7_PARTITION_SUPPORT_BYTES 4
+
+#define F35_ERROR_CODE_OFFSET 0
+#define F35_FLASH_STATUS_OFFSET 5
+#define F35_CHUNK_NUM_LSB_OFFSET 0
+#define F35_CHUNK_NUM_MSB_OFFSET 1
+#define F35_CHUNK_DATA_OFFSET 2
+#define F35_CHUNK_COMMAND_OFFSET 18
+
+#define F35_CHUNK_SIZE 16
+#define F35_ERASE_ALL_WAIT_MS 5000
+#define F35_RESET_WAIT_MS 250
+
+#define SLEEP_MODE_NORMAL (0x00)
+#define SLEEP_MODE_SENSOR_SLEEP (0x01)
+#define SLEEP_MODE_RESERVED0 (0x02)
+#define SLEEP_MODE_RESERVED1 (0x03)
+
+#define ENABLE_WAIT_MS (1 * 1000)
+#define WRITE_WAIT_MS (3 * 1000)
+#define ERASE_WAIT_MS (5 * 1000)
+
+#define MIN_SLEEP_TIME_US 50
+#define MAX_SLEEP_TIME_US 100
+
+#define INT_DISABLE_WAIT_MS 20
+#define ENTER_FLASH_PROG_WAIT_MS 20
+#define READ_CONFIG_WAIT_MS 20
+
+static int fwu_do_reflash(void);
+
+static int fwu_recovery_check_status(void);
+
+static ssize_t fwu_sysfs_show_image(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count);
+
+static ssize_t fwu_sysfs_store_image(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count);
+
+static ssize_t fwu_sysfs_do_recovery_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_do_reflash_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_write_config_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_read_config_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_config_area_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_image_name_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_image_size_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_block_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_firmware_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_configuration_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_disp_config_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_perm_config_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_bl_config_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_utility_parameter_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_guest_code_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_write_guest_code_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+#ifdef SYNA_TDDI
+static ssize_t fwu_sysfs_write_lockdown_code_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_read_lockdown_code_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+#endif
+
+enum f34_version {
+ F34_V0 = 0,
+ F34_V1,
+ F34_V2,
+};
+
+enum bl_version {
+ BL_V5 = 5,
+ BL_V6 = 6,
+ BL_V7 = 7,
+ BL_V8 = 8,
+};
+
+enum flash_area {
+ NONE = 0,
+ UI_FIRMWARE,
+ UI_CONFIG,
+};
+
+enum update_mode {
+ NORMAL = 1,
+ FORCE = 2,
+ LOCKDOWN = 8,
+};
+
+enum config_area {
+ UI_CONFIG_AREA = 0,
+ PM_CONFIG_AREA,
+ BL_CONFIG_AREA,
+ DP_CONFIG_AREA,
+ FLASH_CONFIG_AREA,
+#ifdef SYNA_TDDI
+ TDDI_FORCE_CONFIG_AREA,
+ TDDI_LCM_DATA_AREA,
+ TDDI_OEM_DATA_AREA,
+#endif
+ UPP_AREA,
+};
+
+enum v7_status {
+ SUCCESS = 0x00,
+ DEVICE_NOT_IN_BOOTLOADER_MODE,
+ INVALID_PARTITION,
+ INVALID_COMMAND,
+ INVALID_BLOCK_OFFSET,
+ INVALID_TRANSFER,
+ NOT_ERASED,
+ FLASH_PROGRAMMING_KEY_INCORRECT,
+ BAD_PARTITION_TABLE,
+ CHECKSUM_FAILED,
+ FLASH_HARDWARE_FAILURE = 0x1f,
+};
+
+enum v7_partition_id {
+ BOOTLOADER_PARTITION = 0x01,
+ DEVICE_CONFIG_PARTITION,
+ FLASH_CONFIG_PARTITION,
+ MANUFACTURING_BLOCK_PARTITION,
+ GUEST_SERIALIZATION_PARTITION,
+ GLOBAL_PARAMETERS_PARTITION,
+ CORE_CODE_PARTITION,
+ CORE_CONFIG_PARTITION,
+ GUEST_CODE_PARTITION,
+ DISPLAY_CONFIG_PARTITION,
+ EXTERNAL_TOUCH_AFE_CONFIG_PARTITION,
+ UTILITY_PARAMETER_PARTITION,
+};
+
+enum v7_flash_command {
+ CMD_V7_IDLE = 0x00,
+ CMD_V7_ENTER_BL,
+ CMD_V7_READ,
+ CMD_V7_WRITE,
+ CMD_V7_ERASE,
+ CMD_V7_ERASE_AP,
+ CMD_V7_SENSOR_ID,
+};
+
+enum v5v6_flash_command {
+ CMD_V5V6_IDLE = 0x0,
+ CMD_V5V6_WRITE_FW = 0x2,
+ CMD_V5V6_ERASE_ALL = 0x3,
+ CMD_V5V6_WRITE_LOCKDOWN = 0x4,
+ CMD_V5V6_READ_CONFIG = 0x5,
+ CMD_V5V6_WRITE_CONFIG = 0x6,
+ CMD_V5V6_ERASE_UI_CONFIG = 0x7,
+ CMD_V5V6_ERASE_BL_CONFIG = 0x9,
+ CMD_V5V6_ERASE_DISP_CONFIG = 0xa,
+ CMD_V5V6_ERASE_GUEST_CODE = 0xb,
+ CMD_V5V6_WRITE_GUEST_CODE = 0xc,
+ CMD_V5V6_ERASE_CHIP = 0x0d,
+ CMD_V5V6_ENABLE_FLASH_PROG = 0xf,
+#ifdef SYNA_TDDI
+ CMD_V5V6_ERASE_FORCE_CONFIG = 0x11,
+ CMD_V5V6_READ_FORCE_CONFIG = 0x12,
+ CMD_V5V6_WRITE_FORCE_CONFIG = 0x13,
+ CMD_V5V6_ERASE_LOCKDOWN_DATA = 0x1a,
+ CMD_V5V6_READ_LOCKDOWN_DATA = 0x1b,
+ CMD_V5V6_WRITE_LOCKDOWN_DATA = 0x1c,
+ CMD_V5V6_ERASE_LCM_DATA = 0x1d,
+ CMD_V5V6_ERASE_OEM_DATA = 0x1e,
+#endif
+};
+
+enum flash_command {
+ CMD_IDLE = 0,
+ CMD_WRITE_FW,
+ CMD_WRITE_CONFIG,
+ CMD_WRITE_LOCKDOWN,
+ CMD_WRITE_GUEST_CODE,
+ CMD_WRITE_BOOTLOADER,
+ CMD_WRITE_UTILITY_PARAM,
+ CMD_READ_CONFIG,
+ CMD_ERASE_ALL,
+ CMD_ERASE_UI_FIRMWARE,
+ CMD_ERASE_UI_CONFIG,
+ CMD_ERASE_BL_CONFIG,
+ CMD_ERASE_DISP_CONFIG,
+ CMD_ERASE_FLASH_CONFIG,
+ CMD_ERASE_GUEST_CODE,
+ CMD_ERASE_BOOTLOADER,
+ CMD_ERASE_UTILITY_PARAMETER,
+ CMD_ENABLE_FLASH_PROG,
+#ifdef SYNA_TDDI
+ CMD_ERASE_CHIP,
+ CMD_ERASE_FORCE_CONFIG,
+ CMD_READ_FORCE_CONFIG,
+ CMD_WRITE_FORCE_CONFIG,
+ CMD_ERASE_LOCKDOWN_DATA,
+ CMD_READ_LOCKDOWN_DATA,
+ CMD_WRITE_LOCKDOWN_DATA,
+ CMD_ERASE_LCM_DATA,
+ CMD_READ_LCM_DATA,
+ CMD_WRITE_LCM_DATA,
+ CMD_ERASE_OEM_DATA,
+ CMD_READ_OEM_DATA,
+ CMD_WRITE_OEM_DATA,
+#endif
+};
+
+enum f35_flash_command {
+ CMD_F35_IDLE = 0x0,
+ CMD_F35_RESERVED = 0x1,
+ CMD_F35_WRITE_CHUNK = 0x2,
+ CMD_F35_ERASE_ALL = 0x3,
+ CMD_F35_RESET = 0x10,
+};
+
+enum container_id {
+ TOP_LEVEL_CONTAINER = 0,
+ UI_CONTAINER,
+ UI_CONFIG_CONTAINER,
+ BL_CONTAINER,
+ BL_IMAGE_CONTAINER,
+ BL_CONFIG_CONTAINER,
+ BL_LOCKDOWN_INFO_CONTAINER,
+ PERMANENT_CONFIG_CONTAINER,
+ GUEST_CODE_CONTAINER,
+ BL_PROTOCOL_DESCRIPTOR_CONTAINER,
+ UI_PROTOCOL_DESCRIPTOR_CONTAINER,
+ RMI_SELF_DISCOVERY_CONTAINER,
+ RMI_PAGE_CONTENT_CONTAINER,
+ GENERAL_INFORMATION_CONTAINER,
+ DEVICE_CONFIG_CONTAINER,
+ FLASH_CONFIG_CONTAINER,
+ GUEST_SERIALIZATION_CONTAINER,
+ GLOBAL_PARAMETERS_CONTAINER,
+ CORE_CODE_CONTAINER,
+ CORE_CONFIG_CONTAINER,
+ DISPLAY_CONFIG_CONTAINER,
+ EXTERNAL_TOUCH_AFE_CONFIG_CONTAINER,
+ UTILITY_CONTAINER,
+ UTILITY_PARAMETER_CONTAINER,
+};
+
+enum utility_parameter_id {
+ UNUSED = 0,
+ FORCE_PARAMETER,
+ ANTI_BENDING_PARAMETER,
+};
+
+struct pdt_properties {
+ union {
+ struct {
+ unsigned char reserved_1:6;
+ unsigned char has_bsr:1;
+ unsigned char reserved_2:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct partition_table {
+ unsigned char partition_id:5;
+ unsigned char byte_0_reserved:3;
+ unsigned char byte_1_reserved;
+ unsigned char partition_length_7_0;
+ unsigned char partition_length_15_8;
+ unsigned char start_physical_address_7_0;
+ unsigned char start_physical_address_15_8;
+ unsigned char partition_properties_7_0;
+ unsigned char partition_properties_15_8;
+} __packed;
+
+struct f01_device_control {
+ union {
+ struct {
+ unsigned char sleep_mode:2;
+ unsigned char nosleep:1;
+ unsigned char reserved:2;
+ unsigned char charger_connected:1;
+ unsigned char report_rate:1;
+ unsigned char configured:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f34_v7_query_0 {
+ union {
+ struct {
+ unsigned char subpacket_1_size:3;
+ unsigned char has_config_id:1;
+ unsigned char f34_query0_b4:1;
+ unsigned char has_thqa:1;
+ unsigned char f34_query0_b6__7:2;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f34_v7_query_1_7 {
+ union {
+ struct {
+ /* query 1 */
+ unsigned char bl_minor_revision;
+ unsigned char bl_major_revision;
+
+ /* query 2 */
+ unsigned char bl_fw_id_7_0;
+ unsigned char bl_fw_id_15_8;
+ unsigned char bl_fw_id_23_16;
+ unsigned char bl_fw_id_31_24;
+
+ /* query 3 */
+ unsigned char minimum_write_size;
+ unsigned char block_size_7_0;
+ unsigned char block_size_15_8;
+ unsigned char flash_page_size_7_0;
+ unsigned char flash_page_size_15_8;
+
+ /* query 4 */
+ unsigned char adjustable_partition_area_size_7_0;
+ unsigned char adjustable_partition_area_size_15_8;
+
+ /* query 5 */
+ unsigned char flash_config_length_7_0;
+ unsigned char flash_config_length_15_8;
+
+ /* query 6 */
+ unsigned char payload_length_7_0;
+ unsigned char payload_length_15_8;
+
+ /* query 7 */
+ unsigned char f34_query7_b0:1;
+ unsigned char has_bootloader:1;
+ unsigned char has_device_config:1;
+ unsigned char has_flash_config:1;
+ unsigned char has_manufacturing_block:1;
+ unsigned char has_guest_serialization:1;
+ unsigned char has_global_parameters:1;
+ unsigned char has_core_code:1;
+ unsigned char has_core_config:1;
+ unsigned char has_guest_code:1;
+ unsigned char has_display_config:1;
+ unsigned char f34_query7_b11__15:5;
+ unsigned char f34_query7_b16__23;
+ unsigned char f34_query7_b24__31;
+ } __packed;
+ unsigned char data[21];
+ };
+};
+
+struct f34_v7_data0 {
+ union {
+ struct {
+ unsigned char operation_status:5;
+ unsigned char device_cfg_status:2;
+ unsigned char bl_mode:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f34_v7_data_1_5 {
+ union {
+ struct {
+ unsigned char partition_id:5;
+ unsigned char f34_data1_b5__7:3;
+ unsigned char block_offset_7_0;
+ unsigned char block_offset_15_8;
+ unsigned char transfer_length_7_0;
+ unsigned char transfer_length_15_8;
+ unsigned char command;
+ unsigned char payload_0;
+ unsigned char payload_1;
+ } __packed;
+ unsigned char data[8];
+ };
+};
+
+struct f34_v5v6_flash_properties {
+ union {
+ struct {
+ unsigned char reg_map:1;
+ unsigned char unlocked:1;
+ unsigned char has_config_id:1;
+ unsigned char has_pm_config:1;
+ unsigned char has_bl_config:1;
+ unsigned char has_disp_config:1;
+ unsigned char has_ctrl1:1;
+ unsigned char has_query4:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f34_v5v6_flash_properties_2 {
+ union {
+ struct {
+ unsigned char has_guest_code:1;
+ unsigned char f34_query4_b1:1;
+ unsigned char has_gesture_config:1;
+ unsigned char has_force_config:1;
+ unsigned char has_lockdown_data:1;
+ unsigned char has_lcm_data:1;
+ unsigned char has_oem_data:1;
+ unsigned char f34_query4_b7:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct register_offset {
+ unsigned char properties;
+ unsigned char properties_2;
+ unsigned char block_size;
+ unsigned char block_count;
+ unsigned char gc_block_count;
+ unsigned char flash_status;
+ unsigned char partition_id;
+ unsigned char block_number;
+ unsigned char transfer_length;
+ unsigned char flash_cmd;
+ unsigned char payload;
+};
+
+struct block_count {
+ unsigned short ui_firmware;
+ unsigned short ui_config;
+ unsigned short dp_config;
+ unsigned short pm_config;
+ unsigned short fl_config;
+ unsigned short bl_image;
+ unsigned short bl_config;
+ unsigned short utility_param;
+ unsigned short lockdown;
+ unsigned short guest_code;
+#ifdef SYNA_TDDI
+ unsigned short tddi_force_config;
+ unsigned short tddi_lockdown_data;
+ unsigned short tddi_lcm_data;
+ unsigned short tddi_oem_data;
+#endif
+ unsigned short total_count;
+};
+
+struct physical_address {
+ unsigned short ui_firmware;
+ unsigned short ui_config;
+ unsigned short dp_config;
+ unsigned short pm_config;
+ unsigned short fl_config;
+ unsigned short bl_image;
+ unsigned short bl_config;
+ unsigned short utility_param;
+ unsigned short lockdown;
+ unsigned short guest_code;
+};
+
+struct container_descriptor {
+ unsigned char content_checksum[4];
+ unsigned char container_id[2];
+ unsigned char minor_version;
+ unsigned char major_version;
+ unsigned char reserved_08;
+ unsigned char reserved_09;
+ unsigned char reserved_0a;
+ unsigned char reserved_0b;
+ unsigned char container_option_flags[4];
+ unsigned char content_options_length[4];
+ unsigned char content_options_address[4];
+ unsigned char content_length[4];
+ unsigned char content_address[4];
+};
+
+struct image_header_10 {
+ unsigned char checksum[4];
+ unsigned char reserved_04;
+ unsigned char reserved_05;
+ unsigned char minor_header_version;
+ unsigned char major_header_version;
+ unsigned char reserved_08;
+ unsigned char reserved_09;
+ unsigned char reserved_0a;
+ unsigned char reserved_0b;
+ unsigned char top_level_container_start_addr[4];
+};
+
+struct image_header_05_06 {
+ /* 0x00 - 0x0f */
+ unsigned char checksum[4];
+ unsigned char reserved_04;
+ unsigned char reserved_05;
+ unsigned char options_firmware_id:1;
+ unsigned char options_bootloader:1;
+ unsigned char options_guest_code:1;
+ unsigned char options_tddi:1;
+ unsigned char options_reserved:4;
+ unsigned char header_version;
+ unsigned char firmware_size[4];
+ unsigned char config_size[4];
+ /* 0x10 - 0x1f */
+ unsigned char product_id[PRODUCT_ID_SIZE];
+ unsigned char package_id[2];
+ unsigned char package_id_revision[2];
+ unsigned char product_info[PRODUCT_INFO_SIZE];
+ /* 0x20 - 0x2f */
+ unsigned char bootloader_addr[4];
+ unsigned char bootloader_size[4];
+ unsigned char ui_addr[4];
+ unsigned char ui_size[4];
+ /* 0x30 - 0x3f */
+ unsigned char ds_id[16];
+ /* 0x40 - 0x4f */
+ union {
+ struct {
+ unsigned char cstmr_product_id[PRODUCT_ID_SIZE];
+ unsigned char reserved_4a_4f[6];
+ };
+ struct {
+ unsigned char dsp_cfg_addr[4];
+ unsigned char dsp_cfg_size[4];
+ unsigned char reserved_48_4f[8];
+ };
+ };
+ /* 0x50 - 0x53 */
+ unsigned char firmware_id[4];
+};
+
+struct block_data {
+ unsigned int size;
+ const unsigned char *data;
+};
+
+struct image_metadata {
+ bool contains_firmware_id;
+ bool contains_bootloader;
+ bool contains_guest_code;
+ bool contains_disp_config;
+ bool contains_perm_config;
+ bool contains_flash_config;
+ bool contains_utility_param;
+ unsigned int firmware_id;
+ unsigned int checksum;
+ unsigned int bootloader_size;
+ unsigned int disp_config_offset;
+ unsigned char bl_version;
+ unsigned char product_id[PRODUCT_ID_SIZE + 1];
+ unsigned char cstmr_product_id[PRODUCT_ID_SIZE + 1];
+ unsigned char utility_param_id[MAX_UTILITY_PARAMS];
+ struct block_data bootloader;
+ struct block_data utility;
+ struct block_data ui_firmware;
+ struct block_data ui_config;
+ struct block_data dp_config;
+ struct block_data pm_config;
+ struct block_data fl_config;
+ struct block_data bl_image;
+ struct block_data bl_config;
+ struct block_data utility_param[MAX_UTILITY_PARAMS];
+ struct block_data lockdown;
+ struct block_data guest_code;
+ struct block_count blkcount;
+ struct physical_address phyaddr;
+};
+
+struct synaptics_rmi4_fwu_handle {
+ enum bl_version bl_version;
+ bool initialized;
+ bool in_bl_mode;
+ bool in_ub_mode;
+ bool bl_mode_device;
+ bool force_update;
+ bool do_lockdown;
+ bool has_guest_code;
+#ifdef SYNA_TDDI
+ bool has_force_config;
+ bool has_lockdown_data;
+ bool has_lcm_data;
+ bool has_oem_data;
+#endif
+ bool has_utility_param;
+ bool new_partition_table;
+ bool incompatible_partition_tables;
+ bool write_bootloader;
+ unsigned int data_pos;
+ unsigned char *ext_data_source;
+ unsigned char *read_config_buf;
+ unsigned char intr_mask;
+ unsigned char command;
+ unsigned char bootloader_id[2];
+ unsigned char config_id[32];
+ unsigned char flash_status;
+ unsigned char partitions;
+#ifdef F51_DISCRETE_FORCE
+ unsigned char *cal_data;
+ unsigned short cal_data_off;
+ unsigned short cal_data_size;
+ unsigned short cal_data_buf_size;
+ unsigned short cal_packet_data_size;
+#endif
+ unsigned short block_size;
+ unsigned short config_size;
+ unsigned short config_area;
+ unsigned short config_block_count;
+ unsigned short flash_config_length;
+ unsigned short payload_length;
+ unsigned short partition_table_bytes;
+ unsigned short read_config_buf_size;
+ const unsigned char *config_data;
+ const unsigned char *image;
+ unsigned char *image_name;
+ unsigned int image_size;
+ struct image_metadata img;
+ struct register_offset off;
+ struct block_count blkcount;
+ struct physical_address phyaddr;
+ struct f34_v5v6_flash_properties flash_properties;
+ struct synaptics_rmi4_fn_desc f34_fd;
+ struct synaptics_rmi4_fn_desc f35_fd;
+ struct synaptics_rmi4_data *rmi4_data;
+ struct workqueue_struct *fwu_workqueue;
+ struct work_struct fwu_work;
+};
+
+static struct bin_attribute dev_attr_data = {
+ .attr = {
+ .name = "data",
+ .mode = 0664,
+ },
+ .size = 0,
+ .read = fwu_sysfs_show_image,
+ .write = fwu_sysfs_store_image,
+};
+
+static struct device_attribute attrs[] = {
+ __ATTR(dorecovery, 0220,
+ synaptics_rmi4_show_error,
+ fwu_sysfs_do_recovery_store),
+ __ATTR(doreflash, 0220,
+ synaptics_rmi4_show_error,
+ fwu_sysfs_do_reflash_store),
+ __ATTR(writeconfig, 0220,
+ synaptics_rmi4_show_error,
+ fwu_sysfs_write_config_store),
+ __ATTR(readconfig, 0220,
+ synaptics_rmi4_show_error,
+ fwu_sysfs_read_config_store),
+ __ATTR(configarea, 0220,
+ synaptics_rmi4_show_error,
+ fwu_sysfs_config_area_store),
+ __ATTR(imagename, 0220,
+ synaptics_rmi4_show_error,
+ fwu_sysfs_image_name_store),
+ __ATTR(imagesize, 0220,
+ synaptics_rmi4_show_error,
+ fwu_sysfs_image_size_store),
+ __ATTR(blocksize, 0444,
+ fwu_sysfs_block_size_show,
+ synaptics_rmi4_store_error),
+ __ATTR(fwblockcount, 0444,
+ fwu_sysfs_firmware_block_count_show,
+ synaptics_rmi4_store_error),
+ __ATTR(configblockcount, 0444,
+ fwu_sysfs_configuration_block_count_show,
+ synaptics_rmi4_store_error),
+ __ATTR(dispconfigblockcount, 0444,
+ fwu_sysfs_disp_config_block_count_show,
+ synaptics_rmi4_store_error),
+ __ATTR(permconfigblockcount, 0444,
+ fwu_sysfs_perm_config_block_count_show,
+ synaptics_rmi4_store_error),
+ __ATTR(blconfigblockcount, 0444,
+ fwu_sysfs_bl_config_block_count_show,
+ synaptics_rmi4_store_error),
+ __ATTR(uppblockcount, 0444,
+ fwu_sysfs_utility_parameter_block_count_show,
+ synaptics_rmi4_store_error),
+ __ATTR(guestcodeblockcount, 0444,
+ fwu_sysfs_guest_code_block_count_show,
+ synaptics_rmi4_store_error),
+ __ATTR(writeguestcode, 0220,
+ synaptics_rmi4_show_error,
+ fwu_sysfs_write_guest_code_store),
+#ifdef SYNA_TDDI
+ __ATTR(lockdowncode, 0664,
+ fwu_sysfs_read_lockdown_code_show,
+ fwu_sysfs_write_lockdown_code_store),
+#endif
+};
+
+static struct synaptics_rmi4_fwu_handle *fwu;
+
+DECLARE_COMPLETION(fwu_remove_complete);
+
+DEFINE_MUTEX(fwu_sysfs_mutex);
+
+static void calculate_checksum(unsigned short *data, unsigned long len,
+ unsigned long *result)
+{
+ unsigned long temp;
+ unsigned long sum1 = 0xffff;
+ unsigned long sum2 = 0xffff;
+
+ *result = 0xffffffff;
+
+ while (len--) {
+ temp = *data;
+ sum1 += temp;
+ sum2 += sum1;
+ sum1 = (sum1 & 0xffff) + (sum1 >> 16);
+ sum2 = (sum2 & 0xffff) + (sum2 >> 16);
+ data++;
+ }
+
+ *result = sum2 << 16 | sum1;
+
+ return;
+}
+
+static void convert_to_little_endian(unsigned char *dest, unsigned long src)
+{
+ dest[0] = (unsigned char)(src & 0xff);
+ dest[1] = (unsigned char)((src >> 8) & 0xff);
+ dest[2] = (unsigned char)((src >> 16) & 0xff);
+ dest[3] = (unsigned char)((src >> 24) & 0xff);
+
+ return;
+}
+
+static unsigned int le_to_uint(const unsigned char *ptr)
+{
+ return (unsigned int)ptr[0] +
+ (unsigned int)ptr[1] * 0x100 +
+ (unsigned int)ptr[2] * 0x10000 +
+ (unsigned int)ptr[3] * 0x1000000;
+}
+
+#ifdef F51_DISCRETE_FORCE
+static int fwu_f51_force_data_init(void)
+{
+ int retval;
+ unsigned char query_count;
+ unsigned char packet_info;
+ unsigned char offset[2];
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f51_query_base_addr + 7,
+ offset,
+ sizeof(offset));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read force data offset\n",
+ __func__);
+ return retval;
+ }
+
+ fwu->cal_data_off = offset[0] | offset[1] << 8;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f51_query_base_addr,
+ &query_count,
+ sizeof(query_count));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read number of F51 query registers\n",
+ __func__);
+ return retval;
+ }
+
+ if (query_count >= 10) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f51_query_base_addr + 9,
+ &packet_info,
+ sizeof(packet_info));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read F51 packet register info\n",
+ __func__);
+ return retval;
+ }
+
+ if (packet_info & MASK_1BIT) {
+ fwu->cal_packet_data_size = packet_info >> 1;
+ fwu->cal_packet_data_size *= 2;
+ } else {
+ fwu->cal_packet_data_size = 0;
+ }
+ } else {
+ fwu->cal_packet_data_size = 0;
+ }
+
+ fwu->cal_data_size = CAL_DATA_SIZE + fwu->cal_packet_data_size;
+ if (fwu->cal_data_size > fwu->cal_data_buf_size) {
+ kfree(fwu->cal_data);
+ fwu->cal_data_buf_size = fwu->cal_data_size;
+ fwu->cal_data = kmalloc(fwu->cal_data_buf_size, GFP_KERNEL);
+ if (!fwu->cal_data) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for fwu->cal_data\n",
+ __func__);
+ fwu->cal_data_buf_size = 0;
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static int fwu_allocate_read_config_buf(unsigned int count)
+{
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (count > fwu->read_config_buf_size) {
+ kfree(fwu->read_config_buf);
+ fwu->read_config_buf = kzalloc(count, GFP_KERNEL);
+ if (!fwu->read_config_buf) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for fwu->read_config_buf\n",
+ __func__);
+ fwu->read_config_buf_size = 0;
+ return -ENOMEM;
+ }
+ fwu->read_config_buf_size = count;
+ }
+
+ return 0;
+}
+
+static void fwu_compare_partition_tables(void)
+{
+ fwu->incompatible_partition_tables = false;
+
+ if (fwu->phyaddr.bl_image != fwu->img.phyaddr.bl_image)
+ fwu->incompatible_partition_tables = true;
+ else if (fwu->phyaddr.lockdown != fwu->img.phyaddr.lockdown)
+ fwu->incompatible_partition_tables = true;
+ else if (fwu->phyaddr.bl_config != fwu->img.phyaddr.bl_config)
+ fwu->incompatible_partition_tables = true;
+ else if (fwu->phyaddr.utility_param != fwu->img.phyaddr.utility_param)
+ fwu->incompatible_partition_tables = true;
+
+ if (fwu->bl_version == BL_V7) {
+ if (fwu->phyaddr.fl_config != fwu->img.phyaddr.fl_config)
+ fwu->incompatible_partition_tables = true;
+ }
+
+ fwu->new_partition_table = false;
+
+ if (fwu->phyaddr.ui_firmware != fwu->img.phyaddr.ui_firmware)
+ fwu->new_partition_table = true;
+ else if (fwu->phyaddr.ui_config != fwu->img.phyaddr.ui_config)
+ fwu->new_partition_table = true;
+
+ if (fwu->flash_properties.has_disp_config) {
+ if (fwu->phyaddr.dp_config != fwu->img.phyaddr.dp_config)
+ fwu->new_partition_table = true;
+ }
+
+ if (fwu->has_guest_code) {
+ if (fwu->phyaddr.guest_code != fwu->img.phyaddr.guest_code)
+ fwu->new_partition_table = true;
+ }
+
+ return;
+}
+
+static void fwu_parse_partition_table(const unsigned char *partition_table,
+ struct block_count *blkcount, struct physical_address *phyaddr)
+{
+ unsigned char ii;
+ unsigned char index;
+ unsigned char offset;
+ unsigned short partition_length;
+ unsigned short physical_address;
+ struct partition_table *ptable;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ for (ii = 0; ii < fwu->partitions; ii++) {
+ index = ii * 8 + 2;
+ ptable = (struct partition_table *)&partition_table[index];
+ partition_length = ptable->partition_length_15_8 << 8 |
+ ptable->partition_length_7_0;
+ physical_address = ptable->start_physical_address_15_8 << 8 |
+ ptable->start_physical_address_7_0;
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Partition entry %d:\n",
+ __func__, ii);
+ for (offset = 0; offset < 8; offset++) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: 0x%02x\n",
+ __func__,
+ partition_table[index + offset]);
+ }
+ switch (ptable->partition_id) {
+ case CORE_CODE_PARTITION:
+ blkcount->ui_firmware = partition_length;
+ phyaddr->ui_firmware = physical_address;
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Core code block count: %d\n",
+ __func__, blkcount->ui_firmware);
+ blkcount->total_count += partition_length;
+ break;
+ case CORE_CONFIG_PARTITION:
+ blkcount->ui_config = partition_length;
+ phyaddr->ui_config = physical_address;
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Core config block count: %d\n",
+ __func__, blkcount->ui_config);
+ blkcount->total_count += partition_length;
+ break;
+ case BOOTLOADER_PARTITION:
+ blkcount->bl_image = partition_length;
+ phyaddr->bl_image = physical_address;
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Bootloader block count: %d\n",
+ __func__, blkcount->bl_image);
+ blkcount->total_count += partition_length;
+ break;
+ case UTILITY_PARAMETER_PARTITION:
+ blkcount->utility_param = partition_length;
+ phyaddr->utility_param = physical_address;
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Utility parameter block count: %d\n",
+ __func__, blkcount->utility_param);
+ blkcount->total_count += partition_length;
+ break;
+ case DISPLAY_CONFIG_PARTITION:
+ blkcount->dp_config = partition_length;
+ phyaddr->dp_config = physical_address;
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Display config block count: %d\n",
+ __func__, blkcount->dp_config);
+ blkcount->total_count += partition_length;
+ break;
+ case FLASH_CONFIG_PARTITION:
+ blkcount->fl_config = partition_length;
+ phyaddr->fl_config = physical_address;
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Flash config block count: %d\n",
+ __func__, blkcount->fl_config);
+ blkcount->total_count += partition_length;
+ break;
+ case GUEST_CODE_PARTITION:
+ blkcount->guest_code = partition_length;
+ phyaddr->guest_code = physical_address;
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Guest code block count: %d\n",
+ __func__, blkcount->guest_code);
+ blkcount->total_count += partition_length;
+ break;
+ case GUEST_SERIALIZATION_PARTITION:
+ blkcount->pm_config = partition_length;
+ phyaddr->pm_config = physical_address;
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Guest serialization block count: %d\n",
+ __func__, blkcount->pm_config);
+ blkcount->total_count += partition_length;
+ break;
+ case GLOBAL_PARAMETERS_PARTITION:
+ blkcount->bl_config = partition_length;
+ phyaddr->bl_config = physical_address;
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Global parameters block count: %d\n",
+ __func__, blkcount->bl_config);
+ blkcount->total_count += partition_length;
+ break;
+ case DEVICE_CONFIG_PARTITION:
+ blkcount->lockdown = partition_length;
+ phyaddr->lockdown = physical_address;
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Device config block count: %d\n",
+ __func__, blkcount->lockdown);
+ blkcount->total_count += partition_length;
+ break;
+ };
+ }
+
+ return;
+}
+
+static void fwu_parse_image_header_10_utility(const unsigned char *image)
+{
+ unsigned char ii;
+ unsigned char num_of_containers;
+ unsigned int addr;
+ unsigned int container_id;
+ unsigned int length;
+ const unsigned char *content;
+ struct container_descriptor *descriptor;
+
+ num_of_containers = fwu->img.utility.size / 4;
+
+ for (ii = 0; ii < num_of_containers; ii++) {
+ if (ii >= MAX_UTILITY_PARAMS)
+ continue;
+ addr = le_to_uint(fwu->img.utility.data + (ii * 4));
+ descriptor = (struct container_descriptor *)(image + addr);
+ container_id = descriptor->container_id[0] |
+ descriptor->container_id[1] << 8;
+ content = image + le_to_uint(descriptor->content_address);
+ length = le_to_uint(descriptor->content_length);
+ switch (container_id) {
+ case UTILITY_PARAMETER_CONTAINER:
+ fwu->img.utility_param[ii].data = content;
+ fwu->img.utility_param[ii].size = length;
+ fwu->img.utility_param_id[ii] = content[0];
+ break;
+ default:
+ break;
+ };
+ }
+
+ return;
+}
+
+static void fwu_parse_image_header_10_bootloader(const unsigned char *image)
+{
+ unsigned char ii;
+ unsigned char num_of_containers;
+ unsigned int addr;
+ unsigned int container_id;
+ unsigned int length;
+ const unsigned char *content;
+ struct container_descriptor *descriptor;
+
+ num_of_containers = (fwu->img.bootloader.size - 4) / 4;
+
+ for (ii = 1; ii <= num_of_containers; ii++) {
+ addr = le_to_uint(fwu->img.bootloader.data + (ii * 4));
+ descriptor = (struct container_descriptor *)(image + addr);
+ container_id = descriptor->container_id[0] |
+ descriptor->container_id[1] << 8;
+ content = image + le_to_uint(descriptor->content_address);
+ length = le_to_uint(descriptor->content_length);
+ switch (container_id) {
+ case BL_IMAGE_CONTAINER:
+ fwu->img.bl_image.data = content;
+ fwu->img.bl_image.size = length;
+ break;
+ case BL_CONFIG_CONTAINER:
+ case GLOBAL_PARAMETERS_CONTAINER:
+ fwu->img.bl_config.data = content;
+ fwu->img.bl_config.size = length;
+ break;
+ case BL_LOCKDOWN_INFO_CONTAINER:
+ case DEVICE_CONFIG_CONTAINER:
+ fwu->img.lockdown.data = content;
+ fwu->img.lockdown.size = length;
+ break;
+ default:
+ break;
+ };
+ }
+
+ return;
+}
+
+static void fwu_parse_image_header_10(void)
+{
+ unsigned char ii;
+ unsigned char num_of_containers;
+ unsigned int addr;
+ unsigned int offset;
+ unsigned int container_id;
+ unsigned int length;
+ const unsigned char *image;
+ const unsigned char *content;
+ struct container_descriptor *descriptor;
+ struct image_header_10 *header;
+
+ image = fwu->image;
+ header = (struct image_header_10 *)image;
+
+ fwu->img.checksum = le_to_uint(header->checksum);
+
+ /* address of top level container */
+ offset = le_to_uint(header->top_level_container_start_addr);
+ descriptor = (struct container_descriptor *)(image + offset);
+
+ /* address of top level container content */
+ offset = le_to_uint(descriptor->content_address);
+ num_of_containers = le_to_uint(descriptor->content_length) / 4;
+
+ for (ii = 0; ii < num_of_containers; ii++) {
+ addr = le_to_uint(image + offset);
+ offset += 4;
+ descriptor = (struct container_descriptor *)(image + addr);
+ container_id = descriptor->container_id[0] |
+ descriptor->container_id[1] << 8;
+ content = image + le_to_uint(descriptor->content_address);
+ length = le_to_uint(descriptor->content_length);
+ switch (container_id) {
+ case UI_CONTAINER:
+ case CORE_CODE_CONTAINER:
+ fwu->img.ui_firmware.data = content;
+ fwu->img.ui_firmware.size = length;
+ break;
+ case UI_CONFIG_CONTAINER:
+ case CORE_CONFIG_CONTAINER:
+ fwu->img.ui_config.data = content;
+ fwu->img.ui_config.size = length;
+ break;
+ case BL_CONTAINER:
+ fwu->img.bl_version = *content;
+ fwu->img.bootloader.data = content;
+ fwu->img.bootloader.size = length;
+ fwu_parse_image_header_10_bootloader(image);
+ break;
+ case UTILITY_CONTAINER:
+ fwu->img.utility.data = content;
+ fwu->img.utility.size = length;
+ fwu_parse_image_header_10_utility(image);
+ break;
+ case GUEST_CODE_CONTAINER:
+ fwu->img.contains_guest_code = true;
+ fwu->img.guest_code.data = content;
+ fwu->img.guest_code.size = length;
+ break;
+ case DISPLAY_CONFIG_CONTAINER:
+ fwu->img.contains_disp_config = true;
+ fwu->img.dp_config.data = content;
+ fwu->img.dp_config.size = length;
+ break;
+ case PERMANENT_CONFIG_CONTAINER:
+ case GUEST_SERIALIZATION_CONTAINER:
+ fwu->img.contains_perm_config = true;
+ fwu->img.pm_config.data = content;
+ fwu->img.pm_config.size = length;
+ break;
+ case FLASH_CONFIG_CONTAINER:
+ fwu->img.contains_flash_config = true;
+ fwu->img.fl_config.data = content;
+ fwu->img.fl_config.size = length;
+ break;
+ case GENERAL_INFORMATION_CONTAINER:
+ fwu->img.contains_firmware_id = true;
+ fwu->img.firmware_id = le_to_uint(content + 4);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return;
+}
+
+static void fwu_parse_image_header_05_06(void)
+{
+ int retval;
+ const unsigned char *image;
+ struct image_header_05_06 *header;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ image = fwu->image;
+ header = (struct image_header_05_06 *)image;
+
+ fwu->img.checksum = le_to_uint(header->checksum);
+
+ fwu->img.bl_version = header->header_version;
+
+ fwu->img.contains_bootloader = header->options_bootloader;
+ if (fwu->img.contains_bootloader)
+ fwu->img.bootloader_size = le_to_uint(header->bootloader_size);
+
+ fwu->img.ui_firmware.size = le_to_uint(header->firmware_size);
+ if (fwu->img.ui_firmware.size) {
+ fwu->img.ui_firmware.data = image + IMAGE_AREA_OFFSET;
+ if (fwu->img.contains_bootloader)
+ fwu->img.ui_firmware.data += fwu->img.bootloader_size;
+ }
+
+ if ((fwu->img.bl_version == BL_V6) && header->options_tddi)
+ fwu->img.ui_firmware.data = image + IMAGE_AREA_OFFSET;
+
+ fwu->img.ui_config.size = le_to_uint(header->config_size);
+ if (fwu->img.ui_config.size) {
+ fwu->img.ui_config.data = fwu->img.ui_firmware.data +
+ fwu->img.ui_firmware.size;
+ }
+
+ if (fwu->img.contains_bootloader || header->options_tddi)
+ fwu->img.contains_disp_config = true;
+ else
+ fwu->img.contains_disp_config = false;
+
+ if (fwu->img.contains_disp_config) {
+ fwu->img.disp_config_offset = le_to_uint(header->dsp_cfg_addr);
+ fwu->img.dp_config.size = le_to_uint(header->dsp_cfg_size);
+ fwu->img.dp_config.data = image + fwu->img.disp_config_offset;
+ } else {
+ retval = secure_memcpy(fwu->img.cstmr_product_id,
+ sizeof(fwu->img.cstmr_product_id),
+ header->cstmr_product_id,
+ sizeof(header->cstmr_product_id),
+ PRODUCT_ID_SIZE);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy custom product ID string\n",
+ __func__);
+ }
+ fwu->img.cstmr_product_id[PRODUCT_ID_SIZE] = 0;
+ }
+
+ fwu->img.contains_firmware_id = header->options_firmware_id;
+ if (fwu->img.contains_firmware_id)
+ fwu->img.firmware_id = le_to_uint(header->firmware_id);
+
+ retval = secure_memcpy(fwu->img.product_id,
+ sizeof(fwu->img.product_id),
+ header->product_id,
+ sizeof(header->product_id),
+ PRODUCT_ID_SIZE);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy product ID string\n",
+ __func__);
+ }
+ fwu->img.product_id[PRODUCT_ID_SIZE] = 0;
+
+ fwu->img.lockdown.size = LOCKDOWN_SIZE;
+ fwu->img.lockdown.data = image + IMAGE_AREA_OFFSET - LOCKDOWN_SIZE;
+
+ return;
+}
+
+static int fwu_parse_image_info(void)
+{
+ struct image_header_10 *header;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ header = (struct image_header_10 *)fwu->image;
+
+ memset(&fwu->img, 0x00, sizeof(fwu->img));
+
+ switch (header->major_header_version) {
+ case IMAGE_HEADER_VERSION_10:
+ fwu_parse_image_header_10();
+ break;
+ case IMAGE_HEADER_VERSION_05:
+ case IMAGE_HEADER_VERSION_06:
+ fwu_parse_image_header_05_06();
+ break;
+ default:
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Unsupported image file format (0x%02x)\n",
+ __func__, header->major_header_version);
+ return -EINVAL;
+ }
+
+ if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8) {
+ if (!fwu->img.contains_flash_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: No flash config found in firmware image\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ fwu_parse_partition_table(fwu->img.fl_config.data,
+ &fwu->img.blkcount, &fwu->img.phyaddr);
+
+ if (fwu->img.blkcount.utility_param)
+ fwu->img.contains_utility_param = true;
+
+ fwu_compare_partition_tables();
+ } else {
+ fwu->new_partition_table = false;
+ fwu->incompatible_partition_tables = false;
+ }
+
+ return 0;
+}
+
+static int fwu_read_flash_status(void)
+{
+ int retval;
+ unsigned char status;
+ unsigned char command;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fwu->f34_fd.data_base_addr + fwu->off.flash_status,
+ &status,
+ sizeof(status));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read flash status\n",
+ __func__);
+ return retval;
+ }
+
+ fwu->in_bl_mode = status >> 7;
+
+ if (fwu->bl_version == BL_V5)
+ fwu->flash_status = (status >> 4) & MASK_3BIT;
+ else if (fwu->bl_version == BL_V6)
+ fwu->flash_status = status & MASK_3BIT;
+ else if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+ fwu->flash_status = status & MASK_5BIT;
+
+ if (fwu->write_bootloader)
+ fwu->flash_status = 0x00;
+
+ if (fwu->flash_status != 0x00) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Flash status = %d, command = 0x%02x\n",
+ __func__, fwu->flash_status, fwu->command);
+ }
+
+ if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8) {
+ if (fwu->flash_status == 0x08)
+ fwu->flash_status = 0x00;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fwu->f34_fd.data_base_addr + fwu->off.flash_cmd,
+ &command,
+ sizeof(command));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read flash command\n",
+ __func__);
+ return retval;
+ }
+
+ if (fwu->bl_version == BL_V5)
+ fwu->command = command & MASK_4BIT;
+ else if (fwu->bl_version == BL_V6)
+ fwu->command = command & MASK_6BIT;
+ else if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+ fwu->command = command;
+
+ if (fwu->write_bootloader)
+ fwu->command = 0x00;
+
+ return 0;
+}
+
+static int fwu_wait_for_idle(int timeout_ms, bool poll)
+{
+ int count = 0;
+ int timeout_count = ((timeout_ms * 1000) / MAX_SLEEP_TIME_US) + 1;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ do {
+ usleep_range(MIN_SLEEP_TIME_US, MAX_SLEEP_TIME_US);
+
+ count++;
+ if (poll || (count == timeout_count))
+ fwu_read_flash_status();
+
+ if ((fwu->command == CMD_IDLE) && (fwu->flash_status == 0x00))
+ return 0;
+ } while (count < timeout_count);
+
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Timed out waiting for idle status\n",
+ __func__);
+
+ return -ETIMEDOUT;
+}
+
+static int fwu_write_f34_v7_command_single_transaction(unsigned char cmd)
+{
+ int retval;
+ unsigned char data_base;
+ struct f34_v7_data_1_5 data_1_5;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ data_base = fwu->f34_fd.data_base_addr;
+
+ memset(data_1_5.data, 0x00, sizeof(data_1_5.data));
+
+ switch (cmd) {
+ case CMD_ERASE_ALL:
+ data_1_5.partition_id = CORE_CODE_PARTITION;
+ data_1_5.command = CMD_V7_ERASE_AP;
+ break;
+ case CMD_ERASE_UI_FIRMWARE:
+ data_1_5.partition_id = CORE_CODE_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case CMD_ERASE_BL_CONFIG:
+ data_1_5.partition_id = GLOBAL_PARAMETERS_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case CMD_ERASE_UI_CONFIG:
+ data_1_5.partition_id = CORE_CONFIG_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case CMD_ERASE_DISP_CONFIG:
+ data_1_5.partition_id = DISPLAY_CONFIG_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case CMD_ERASE_FLASH_CONFIG:
+ data_1_5.partition_id = FLASH_CONFIG_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case CMD_ERASE_GUEST_CODE:
+ data_1_5.partition_id = GUEST_CODE_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case CMD_ERASE_BOOTLOADER:
+ data_1_5.partition_id = BOOTLOADER_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case CMD_ERASE_UTILITY_PARAMETER:
+ data_1_5.partition_id = UTILITY_PARAMETER_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case CMD_ENABLE_FLASH_PROG:
+ data_1_5.partition_id = BOOTLOADER_PARTITION;
+ data_1_5.command = CMD_V7_ENTER_BL;
+ break;
+ };
+
+ data_1_5.payload_0 = fwu->bootloader_id[0];
+ data_1_5.payload_1 = fwu->bootloader_id[1];
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ data_base + fwu->off.partition_id,
+ data_1_5.data,
+ sizeof(data_1_5.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write single transaction command\n",
+ __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+static int fwu_write_f34_v7_command(unsigned char cmd)
+{
+ int retval;
+ unsigned char data_base;
+ unsigned char command;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ data_base = fwu->f34_fd.data_base_addr;
+
+ switch (cmd) {
+ case CMD_WRITE_FW:
+ case CMD_WRITE_CONFIG:
+ case CMD_WRITE_LOCKDOWN:
+ case CMD_WRITE_GUEST_CODE:
+ case CMD_WRITE_BOOTLOADER:
+ case CMD_WRITE_UTILITY_PARAM:
+ command = CMD_V7_WRITE;
+ break;
+ case CMD_READ_CONFIG:
+ command = CMD_V7_READ;
+ break;
+ case CMD_ERASE_ALL:
+ command = CMD_V7_ERASE_AP;
+ break;
+ case CMD_ERASE_UI_FIRMWARE:
+ case CMD_ERASE_BL_CONFIG:
+ case CMD_ERASE_UI_CONFIG:
+ case CMD_ERASE_DISP_CONFIG:
+ case CMD_ERASE_FLASH_CONFIG:
+ case CMD_ERASE_GUEST_CODE:
+ case CMD_ERASE_BOOTLOADER:
+ case CMD_ERASE_UTILITY_PARAMETER:
+ command = CMD_V7_ERASE;
+ break;
+ case CMD_ENABLE_FLASH_PROG:
+ command = CMD_V7_ENTER_BL;
+ break;
+ default:
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Invalid command 0x%02x\n",
+ __func__, cmd);
+ return -EINVAL;
+ };
+
+ fwu->command = command;
+
+ switch (cmd) {
+ case CMD_ERASE_ALL:
+ case CMD_ERASE_UI_FIRMWARE:
+ case CMD_ERASE_BL_CONFIG:
+ case CMD_ERASE_UI_CONFIG:
+ case CMD_ERASE_DISP_CONFIG:
+ case CMD_ERASE_FLASH_CONFIG:
+ case CMD_ERASE_GUEST_CODE:
+ case CMD_ERASE_BOOTLOADER:
+ case CMD_ERASE_UTILITY_PARAMETER:
+ case CMD_ENABLE_FLASH_PROG:
+ retval = fwu_write_f34_v7_command_single_transaction(cmd);
+ if (retval < 0)
+ return retval;
+ else
+ return 0;
+ default:
+ break;
+ };
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ data_base + fwu->off.flash_cmd,
+ &command,
+ sizeof(command));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write flash command\n",
+ __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+static int fwu_write_f34_v5v6_command(unsigned char cmd)
+{
+ int retval;
+ unsigned char data_base;
+ unsigned char command;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ data_base = fwu->f34_fd.data_base_addr;
+
+ switch (cmd) {
+ case CMD_IDLE:
+ command = CMD_V5V6_IDLE;
+ break;
+ case CMD_WRITE_FW:
+ command = CMD_V5V6_WRITE_FW;
+ break;
+ case CMD_WRITE_CONFIG:
+ command = CMD_V5V6_WRITE_CONFIG;
+ break;
+ case CMD_WRITE_LOCKDOWN:
+ command = CMD_V5V6_WRITE_LOCKDOWN;
+ break;
+ case CMD_WRITE_GUEST_CODE:
+ command = CMD_V5V6_WRITE_GUEST_CODE;
+ break;
+ case CMD_READ_CONFIG:
+ command = CMD_V5V6_READ_CONFIG;
+ break;
+ case CMD_ERASE_ALL:
+ command = CMD_V5V6_ERASE_ALL;
+ break;
+ case CMD_ERASE_UI_CONFIG:
+ command = CMD_V5V6_ERASE_UI_CONFIG;
+ break;
+ case CMD_ERASE_DISP_CONFIG:
+ command = CMD_V5V6_ERASE_DISP_CONFIG;
+ break;
+ case CMD_ERASE_GUEST_CODE:
+ command = CMD_V5V6_ERASE_GUEST_CODE;
+ break;
+ case CMD_ENABLE_FLASH_PROG:
+ command = CMD_V5V6_ENABLE_FLASH_PROG;
+ break;
+#ifdef SYNA_TDDI
+ case CMD_ERASE_CHIP:
+ command = CMD_V5V6_ERASE_CHIP;
+ break;
+ case CMD_ERASE_FORCE_CONFIG:
+ command = CMD_V5V6_ERASE_FORCE_CONFIG;
+ break;
+ case CMD_READ_FORCE_CONFIG:
+ command = CMD_V5V6_READ_FORCE_CONFIG;
+ break;
+ case CMD_WRITE_FORCE_CONFIG:
+ command = CMD_V5V6_WRITE_CONFIG;
+ break;
+ case CMD_ERASE_LOCKDOWN_DATA:
+ command = CMD_V5V6_ERASE_LOCKDOWN_DATA;
+ break;
+ case CMD_READ_LOCKDOWN_DATA:
+ command = CMD_V5V6_READ_LOCKDOWN_DATA;
+ break;
+ case CMD_WRITE_LOCKDOWN_DATA:
+ command = CMD_V5V6_WRITE_LOCKDOWN_DATA;
+ break;
+ case CMD_ERASE_LCM_DATA:
+ command = CMD_V5V6_ERASE_LCM_DATA;
+ break;
+ case CMD_ERASE_OEM_DATA:
+ command = CMD_V5V6_ERASE_OEM_DATA;
+ break;
+#endif
+ default:
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Invalid command 0x%02x\n",
+ __func__, cmd);
+ return -EINVAL;
+ }
+
+ switch (cmd) {
+ case CMD_ERASE_ALL:
+ case CMD_ERASE_UI_CONFIG:
+ case CMD_ERASE_DISP_CONFIG:
+ case CMD_ERASE_GUEST_CODE:
+#ifdef SYNA_TDDI
+ case CMD_ERASE_CHIP:
+ case CMD_ERASE_FORCE_CONFIG:
+ case CMD_ERASE_LOCKDOWN_DATA:
+ case CMD_ERASE_LCM_DATA:
+ case CMD_ERASE_OEM_DATA:
+#endif
+ case CMD_ENABLE_FLASH_PROG:
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ data_base + fwu->off.payload,
+ fwu->bootloader_id,
+ sizeof(fwu->bootloader_id));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write bootloader ID\n",
+ __func__);
+ return retval;
+ }
+ break;
+ default:
+ break;
+ };
+
+ fwu->command = command;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ data_base + fwu->off.flash_cmd,
+ &command,
+ sizeof(command));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write command 0x%02x\n",
+ __func__, command);
+ return retval;
+ }
+
+ return 0;
+}
+
+static int fwu_write_f34_command(unsigned char cmd)
+{
+ int retval;
+
+ if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+ retval = fwu_write_f34_v7_command(cmd);
+ else
+ retval = fwu_write_f34_v5v6_command(cmd);
+
+ return retval;
+}
+
+static int fwu_write_f34_v7_partition_id(unsigned char cmd)
+{
+ int retval;
+ unsigned char data_base;
+ unsigned char partition;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ data_base = fwu->f34_fd.data_base_addr;
+
+ switch (cmd) {
+ case CMD_WRITE_FW:
+ partition = CORE_CODE_PARTITION;
+ break;
+ case CMD_WRITE_CONFIG:
+ case CMD_READ_CONFIG:
+ if (fwu->config_area == UI_CONFIG_AREA)
+ partition = CORE_CONFIG_PARTITION;
+ else if (fwu->config_area == DP_CONFIG_AREA)
+ partition = DISPLAY_CONFIG_PARTITION;
+ else if (fwu->config_area == PM_CONFIG_AREA)
+ partition = GUEST_SERIALIZATION_PARTITION;
+ else if (fwu->config_area == BL_CONFIG_AREA)
+ partition = GLOBAL_PARAMETERS_PARTITION;
+ else if (fwu->config_area == FLASH_CONFIG_AREA)
+ partition = FLASH_CONFIG_PARTITION;
+ else if (fwu->config_area == UPP_AREA)
+ partition = UTILITY_PARAMETER_PARTITION;
+ break;
+ case CMD_WRITE_LOCKDOWN:
+ partition = DEVICE_CONFIG_PARTITION;
+ break;
+ case CMD_WRITE_GUEST_CODE:
+ partition = GUEST_CODE_PARTITION;
+ break;
+ case CMD_WRITE_BOOTLOADER:
+ partition = BOOTLOADER_PARTITION;
+ break;
+ case CMD_WRITE_UTILITY_PARAM:
+ partition = UTILITY_PARAMETER_PARTITION;
+ break;
+ case CMD_ERASE_ALL:
+ partition = CORE_CODE_PARTITION;
+ break;
+ case CMD_ERASE_BL_CONFIG:
+ partition = GLOBAL_PARAMETERS_PARTITION;
+ break;
+ case CMD_ERASE_UI_CONFIG:
+ partition = CORE_CONFIG_PARTITION;
+ break;
+ case CMD_ERASE_DISP_CONFIG:
+ partition = DISPLAY_CONFIG_PARTITION;
+ break;
+ case CMD_ERASE_FLASH_CONFIG:
+ partition = FLASH_CONFIG_PARTITION;
+ break;
+ case CMD_ERASE_GUEST_CODE:
+ partition = GUEST_CODE_PARTITION;
+ break;
+ case CMD_ERASE_BOOTLOADER:
+ partition = BOOTLOADER_PARTITION;
+ break;
+ case CMD_ENABLE_FLASH_PROG:
+ partition = BOOTLOADER_PARTITION;
+ break;
+ default:
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Invalid command 0x%02x\n",
+ __func__, cmd);
+ return -EINVAL;
+ };
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ data_base + fwu->off.partition_id,
+ &partition,
+ sizeof(partition));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write partition ID\n",
+ __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+static int fwu_write_f34_partition_id(unsigned char cmd)
+{
+ int retval;
+
+ if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+ retval = fwu_write_f34_v7_partition_id(cmd);
+ else
+ retval = 0;
+
+ return retval;
+}
+
+static int fwu_read_f34_v7_partition_table(unsigned char *partition_table)
+{
+ int retval;
+ unsigned char data_base;
+ unsigned char length[2];
+ unsigned short block_number = 0;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ data_base = fwu->f34_fd.data_base_addr;
+
+ fwu->config_area = FLASH_CONFIG_AREA;
+
+ retval = fwu_write_f34_partition_id(CMD_READ_CONFIG);
+ if (retval < 0)
+ return retval;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ data_base + fwu->off.block_number,
+ (unsigned char *)&block_number,
+ sizeof(block_number));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write block number\n",
+ __func__);
+ return retval;
+ }
+
+ length[0] = (unsigned char)(fwu->flash_config_length & MASK_8BIT);
+ length[1] = (unsigned char)(fwu->flash_config_length >> 8);
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ data_base + fwu->off.transfer_length,
+ length,
+ sizeof(length));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write transfer length\n",
+ __func__);
+ return retval;
+ }
+
+ retval = fwu_write_f34_command(CMD_READ_CONFIG);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write command\n",
+ __func__);
+ return retval;
+ }
+
+ msleep(READ_CONFIG_WAIT_MS);
+
+ retval = fwu_wait_for_idle(WRITE_WAIT_MS, true);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to wait for idle status\n",
+ __func__);
+ return retval;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ data_base + fwu->off.payload,
+ partition_table,
+ fwu->partition_table_bytes);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read block data\n",
+ __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+static int fwu_read_f34_v7_queries(void)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char query_base;
+ unsigned char index;
+ unsigned char offset;
+ unsigned char *ptable;
+ struct f34_v7_query_0 query_0;
+ struct f34_v7_query_1_7 query_1_7;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ query_base = fwu->f34_fd.query_base_addr;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ query_base,
+ query_0.data,
+ sizeof(query_0.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read query 0\n",
+ __func__);
+ return retval;
+ }
+
+ offset = query_0.subpacket_1_size + 1;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ query_base + offset,
+ query_1_7.data,
+ sizeof(query_1_7.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read queries 1 to 7\n",
+ __func__);
+ return retval;
+ }
+
+ fwu->bootloader_id[0] = query_1_7.bl_minor_revision;
+ fwu->bootloader_id[1] = query_1_7.bl_major_revision;
+
+ if (fwu->bootloader_id[1] == BL_V8)
+ fwu->bl_version = BL_V8;
+
+ fwu->block_size = query_1_7.block_size_15_8 << 8 |
+ query_1_7.block_size_7_0;
+
+ fwu->flash_config_length = query_1_7.flash_config_length_15_8 << 8 |
+ query_1_7.flash_config_length_7_0;
+
+ fwu->payload_length = query_1_7.payload_length_15_8 << 8 |
+ query_1_7.payload_length_7_0;
+
+ fwu->off.flash_status = V7_FLASH_STATUS_OFFSET;
+ fwu->off.partition_id = V7_PARTITION_ID_OFFSET;
+ fwu->off.block_number = V7_BLOCK_NUMBER_OFFSET;
+ fwu->off.transfer_length = V7_TRANSFER_LENGTH_OFFSET;
+ fwu->off.flash_cmd = V7_COMMAND_OFFSET;
+ fwu->off.payload = V7_PAYLOAD_OFFSET;
+
+ index = sizeof(query_1_7.data) - V7_PARTITION_SUPPORT_BYTES;
+
+ fwu->partitions = 0;
+ for (offset = 0; offset < V7_PARTITION_SUPPORT_BYTES; offset++) {
+ for (ii = 0; ii < 8; ii++) {
+ if (query_1_7.data[index + offset] & (1 << ii))
+ fwu->partitions++;
+ }
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Supported partitions: 0x%02x\n",
+ __func__, query_1_7.data[index + offset]);
+ }
+
+ fwu->partition_table_bytes = fwu->partitions * 8 + 2;
+
+ ptable = kzalloc(fwu->partition_table_bytes, GFP_KERNEL);
+ if (!ptable) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for partition table\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ retval = fwu_read_f34_v7_partition_table(ptable);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read partition table\n",
+ __func__);
+ kfree(ptable);
+ return retval;
+ }
+
+ fwu_parse_partition_table(ptable, &fwu->blkcount, &fwu->phyaddr);
+
+ if (fwu->blkcount.dp_config)
+ fwu->flash_properties.has_disp_config = 1;
+ else
+ fwu->flash_properties.has_disp_config = 0;
+
+ if (fwu->blkcount.pm_config)
+ fwu->flash_properties.has_pm_config = 1;
+ else
+ fwu->flash_properties.has_pm_config = 0;
+
+ if (fwu->blkcount.bl_config)
+ fwu->flash_properties.has_bl_config = 1;
+ else
+ fwu->flash_properties.has_bl_config = 0;
+
+ if (fwu->blkcount.guest_code)
+ fwu->has_guest_code = 1;
+ else
+ fwu->has_guest_code = 0;
+
+ if (fwu->blkcount.utility_param)
+ fwu->has_utility_param = 1;
+ else
+ fwu->has_utility_param = 0;
+
+ kfree(ptable);
+
+ return 0;
+}
+
+static int fwu_read_f34_v5v6_queries(void)
+{
+ int retval;
+ unsigned char count;
+ unsigned char base;
+ unsigned char offset;
+ unsigned char buf[10];
+ struct f34_v5v6_flash_properties_2 properties_2;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ base = fwu->f34_fd.query_base_addr;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ base + V5V6_BOOTLOADER_ID_OFFSET,
+ fwu->bootloader_id,
+ sizeof(fwu->bootloader_id));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read bootloader ID\n",
+ __func__);
+ return retval;
+ }
+
+ if (fwu->bl_version == BL_V5) {
+ fwu->off.properties = V5_PROPERTIES_OFFSET;
+ fwu->off.block_size = V5_BLOCK_SIZE_OFFSET;
+ fwu->off.block_count = V5_BLOCK_COUNT_OFFSET;
+ fwu->off.block_number = V5_BLOCK_NUMBER_OFFSET;
+ fwu->off.payload = V5_BLOCK_DATA_OFFSET;
+ } else if (fwu->bl_version == BL_V6) {
+ fwu->off.properties = V6_PROPERTIES_OFFSET;
+ fwu->off.properties_2 = V6_PROPERTIES_2_OFFSET;
+ fwu->off.block_size = V6_BLOCK_SIZE_OFFSET;
+ fwu->off.block_count = V6_BLOCK_COUNT_OFFSET;
+ fwu->off.gc_block_count = V6_GUEST_CODE_BLOCK_COUNT_OFFSET;
+ fwu->off.block_number = V6_BLOCK_NUMBER_OFFSET;
+ fwu->off.payload = V6_BLOCK_DATA_OFFSET;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ base + fwu->off.block_size,
+ buf,
+ 2);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read block size info\n",
+ __func__);
+ return retval;
+ }
+
+ batohs(&fwu->block_size, &(buf[0]));
+
+ if (fwu->bl_version == BL_V5) {
+ fwu->off.flash_cmd = fwu->off.payload + fwu->block_size;
+ fwu->off.flash_status = fwu->off.flash_cmd;
+ } else if (fwu->bl_version == BL_V6) {
+ fwu->off.flash_cmd = V6_FLASH_COMMAND_OFFSET;
+ fwu->off.flash_status = V6_FLASH_STATUS_OFFSET;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ base + fwu->off.properties,
+ fwu->flash_properties.data,
+ sizeof(fwu->flash_properties.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read flash properties\n",
+ __func__);
+ return retval;
+ }
+
+ count = 4;
+
+ if (fwu->flash_properties.has_pm_config)
+ count += 2;
+
+ if (fwu->flash_properties.has_bl_config)
+ count += 2;
+
+ if (fwu->flash_properties.has_disp_config)
+ count += 2;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ base + fwu->off.block_count,
+ buf,
+ count);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read block count info\n",
+ __func__);
+ return retval;
+ }
+
+ batohs(&fwu->blkcount.ui_firmware, &(buf[0]));
+ batohs(&fwu->blkcount.ui_config, &(buf[2]));
+
+ count = 4;
+
+ if (fwu->flash_properties.has_pm_config) {
+ batohs(&fwu->blkcount.pm_config, &(buf[count]));
+ count += 2;
+ }
+
+ if (fwu->flash_properties.has_bl_config) {
+ batohs(&fwu->blkcount.bl_config, &(buf[count]));
+ count += 2;
+ }
+
+ if (fwu->flash_properties.has_disp_config)
+ batohs(&fwu->blkcount.dp_config, &(buf[count]));
+
+ fwu->has_guest_code = false;
+#ifdef SYNA_TDDI
+ fwu->has_force_config = false;
+ fwu->has_lockdown_data = false;
+ fwu->has_lcm_data = false;
+ fwu->has_oem_data = false;
+#endif
+
+ if (fwu->flash_properties.has_query4) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ base + fwu->off.properties_2,
+ properties_2.data,
+ sizeof(properties_2.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read flash properties 2\n",
+ __func__);
+ return retval;
+ }
+ offset = fwu->off.properties_2 + 1;
+ count = 0;
+ if (properties_2.has_guest_code) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ base + offset + count,
+ buf,
+ 2);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read guest code block count\n",
+ __func__);
+ return retval;
+ }
+
+ batohs(&fwu->blkcount.guest_code, &(buf[0]));
+ count++;
+ fwu->has_guest_code = true;
+ }
+#ifdef SYNA_TDDI
+ if (properties_2.has_force_config) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ base + offset + count,
+ buf,
+ 2);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read tddi force block count\n",
+ __func__);
+ return retval;
+ }
+ batohs(&fwu->blkcount.tddi_force_config, &(buf[0]));
+ count++;
+ fwu->has_force_config = true;
+ }
+ if (properties_2.has_lockdown_data) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ base + offset + count,
+ buf,
+ 2);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read tddi lockdown block count\n",
+ __func__);
+ return retval;
+ }
+ batohs(&fwu->blkcount.tddi_lockdown_data, &(buf[0]));
+ count++;
+ fwu->has_lockdown_data = true;
+ }
+ if (properties_2.has_lcm_data) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ base + offset + count,
+ buf,
+ 2);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read tddi lcm block count\n",
+ __func__);
+ return retval;
+ }
+ batohs(&fwu->blkcount.tddi_lcm_data, &(buf[0]));
+ count++;
+ fwu->has_lcm_data = true;
+ }
+ if (properties_2.has_oem_data) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ base + offset + count,
+ buf,
+ 2);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read tddi oem block count\n",
+ __func__);
+ return retval;
+ }
+ batohs(&fwu->blkcount.tddi_oem_data, &(buf[0]));
+ fwu->has_oem_data = true;
+ }
+#endif
+ }
+
+ fwu->has_utility_param = false;
+
+ return 0;
+}
+
+static int fwu_read_f34_queries(void)
+{
+ int retval;
+
+ memset(&fwu->blkcount, 0x00, sizeof(fwu->blkcount));
+ memset(&fwu->phyaddr, 0x00, sizeof(fwu->phyaddr));
+
+ if (fwu->bl_version == BL_V7)
+ retval = fwu_read_f34_v7_queries();
+ else
+ retval = fwu_read_f34_v5v6_queries();
+
+ return retval;
+}
+
+static int fwu_write_f34_v7_blocks(unsigned char *block_ptr,
+ unsigned short block_cnt, unsigned char command)
+{
+ int retval;
+ unsigned char data_base;
+ unsigned char length[2];
+ unsigned short transfer;
+ unsigned short remaining = block_cnt;
+ unsigned short block_number = 0;
+ unsigned short left_bytes;
+ unsigned short write_size;
+ unsigned short max_write_size;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ data_base = fwu->f34_fd.data_base_addr;
+
+ retval = fwu_write_f34_partition_id(command);
+ if (retval < 0)
+ return retval;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ data_base + fwu->off.block_number,
+ (unsigned char *)&block_number,
+ sizeof(block_number));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write block number\n",
+ __func__);
+ return retval;
+ }
+
+ do {
+ if (remaining / fwu->payload_length)
+ transfer = fwu->payload_length;
+ else
+ transfer = remaining;
+
+ length[0] = (unsigned char)(transfer & MASK_8BIT);
+ length[1] = (unsigned char)(transfer >> 8);
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ data_base + fwu->off.transfer_length,
+ length,
+ sizeof(length));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write transfer length (remaining = %d)\n",
+ __func__, remaining);
+ return retval;
+ }
+
+ retval = fwu_write_f34_command(command);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write command (remaining = %d)\n",
+ __func__, remaining);
+ return retval;
+ }
+
+#ifdef MAX_WRITE_SIZE
+ max_write_size = MAX_WRITE_SIZE;
+ if (max_write_size >= transfer * fwu->block_size)
+ max_write_size = transfer * fwu->block_size;
+ else if (max_write_size > fwu->block_size)
+ max_write_size -= max_write_size % fwu->block_size;
+ else
+ max_write_size = fwu->block_size;
+#else
+ max_write_size = transfer * fwu->block_size;
+#endif
+ left_bytes = transfer * fwu->block_size;
+
+ do {
+ if (left_bytes / max_write_size)
+ write_size = max_write_size;
+ else
+ write_size = left_bytes;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ data_base + fwu->off.payload,
+ block_ptr,
+ write_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write block data (remaining = %d)\n",
+ __func__, remaining);
+ return retval;
+ }
+
+ block_ptr += write_size;
+ left_bytes -= write_size;
+ } while (left_bytes);
+
+ retval = fwu_wait_for_idle(WRITE_WAIT_MS, false);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to wait for idle status (remaining = %d)\n",
+ __func__, remaining);
+ return retval;
+ }
+
+ remaining -= transfer;
+ } while (remaining);
+
+ return 0;
+}
+
+static int fwu_write_f34_v5v6_blocks(unsigned char *block_ptr,
+ unsigned short block_cnt, unsigned char command)
+{
+ int retval;
+ unsigned char data_base;
+ unsigned char block_number[] = {0, 0};
+ unsigned short blk;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ data_base = fwu->f34_fd.data_base_addr;
+
+ block_number[1] |= (fwu->config_area << 5);
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ data_base + fwu->off.block_number,
+ block_number,
+ sizeof(block_number));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write block number\n",
+ __func__);
+ return retval;
+ }
+
+ for (blk = 0; blk < block_cnt; blk++) {
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ data_base + fwu->off.payload,
+ block_ptr,
+ fwu->block_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write block data (block %d)\n",
+ __func__, blk);
+ return retval;
+ }
+
+ retval = fwu_write_f34_command(command);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write command for block %d\n",
+ __func__, blk);
+ return retval;
+ }
+
+ retval = fwu_wait_for_idle(WRITE_WAIT_MS, false);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to wait for idle status (block %d)\n",
+ __func__, blk);
+ return retval;
+ }
+
+ block_ptr += fwu->block_size;
+ }
+
+ return 0;
+}
+
+static int fwu_write_f34_blocks(unsigned char *block_ptr,
+ unsigned short block_cnt, unsigned char cmd)
+{
+ int retval;
+
+ if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+ retval = fwu_write_f34_v7_blocks(block_ptr, block_cnt, cmd);
+ else
+ retval = fwu_write_f34_v5v6_blocks(block_ptr, block_cnt, cmd);
+
+ return retval;
+}
+
+static int fwu_read_f34_v7_blocks(unsigned short block_cnt,
+ unsigned char command)
+{
+ int retval;
+ unsigned char data_base;
+ unsigned char length[2];
+ unsigned short transfer;
+ unsigned short remaining = block_cnt;
+ unsigned short block_number = 0;
+ unsigned short index = 0;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ data_base = fwu->f34_fd.data_base_addr;
+
+ retval = fwu_write_f34_partition_id(command);
+ if (retval < 0)
+ return retval;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ data_base + fwu->off.block_number,
+ (unsigned char *)&block_number,
+ sizeof(block_number));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write block number\n",
+ __func__);
+ return retval;
+ }
+
+ do {
+ if (remaining / fwu->payload_length)
+ transfer = fwu->payload_length;
+ else
+ transfer = remaining;
+
+ length[0] = (unsigned char)(transfer & MASK_8BIT);
+ length[1] = (unsigned char)(transfer >> 8);
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ data_base + fwu->off.transfer_length,
+ length,
+ sizeof(length));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write transfer length (remaining = %d)\n",
+ __func__, remaining);
+ return retval;
+ }
+
+ retval = fwu_write_f34_command(command);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write command (remaining = %d)\n",
+ __func__, remaining);
+ return retval;
+ }
+
+ retval = fwu_wait_for_idle(WRITE_WAIT_MS, false);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to wait for idle status (remaining = %d)\n",
+ __func__, remaining);
+ return retval;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ data_base + fwu->off.payload,
+ &fwu->read_config_buf[index],
+ transfer * fwu->block_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read block data (remaining = %d)\n",
+ __func__, remaining);
+ return retval;
+ }
+
+ index += (transfer * fwu->block_size);
+ remaining -= transfer;
+ } while (remaining);
+
+ return 0;
+}
+
+static int fwu_read_f34_v5v6_blocks(unsigned short block_cnt,
+ unsigned char command)
+{
+ int retval;
+ unsigned char data_base;
+ unsigned char block_number[] = {0, 0};
+ unsigned short blk;
+ unsigned short index = 0;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ data_base = fwu->f34_fd.data_base_addr;
+
+ block_number[1] |= (fwu->config_area << 5);
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ data_base + fwu->off.block_number,
+ block_number,
+ sizeof(block_number));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write block number\n",
+ __func__);
+ return retval;
+ }
+
+ for (blk = 0; blk < block_cnt; blk++) {
+ retval = fwu_write_f34_command(command);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write read config command\n",
+ __func__);
+ return retval;
+ }
+
+ retval = fwu_wait_for_idle(WRITE_WAIT_MS, false);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to wait for idle status\n",
+ __func__);
+ return retval;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ data_base + fwu->off.payload,
+ &fwu->read_config_buf[index],
+ fwu->block_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read block data (block %d)\n",
+ __func__, blk);
+ return retval;
+ }
+
+ index += fwu->block_size;
+ }
+
+ return 0;
+}
+
+static int fwu_read_f34_blocks(unsigned short block_cnt, unsigned char cmd)
+{
+ int retval;
+
+ if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+ retval = fwu_read_f34_v7_blocks(block_cnt, cmd);
+ else
+ retval = fwu_read_f34_v5v6_blocks(block_cnt, cmd);
+
+ return retval;
+}
+
+static int fwu_get_image_firmware_id(unsigned int *fw_id)
+{
+ int retval;
+ unsigned char index = 0;
+ char *strptr;
+ char *firmware_id;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (fwu->img.contains_firmware_id) {
+ *fw_id = fwu->img.firmware_id;
+ } else {
+ strptr = strnstr(fwu->image_name, "PR", MAX_IMAGE_NAME_LEN);
+ if (!strptr) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: No valid PR number (PRxxxxxxx) found in image file name (%s)\n",
+ __func__, fwu->image_name);
+ return -EINVAL;
+ }
+
+ strptr += 2;
+ firmware_id = kzalloc(MAX_FIRMWARE_ID_LEN, GFP_KERNEL);
+ if (!firmware_id) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for firmware_id\n",
+ __func__);
+ return -ENOMEM;
+ }
+ while (strptr[index] >= '0' && strptr[index] <= '9') {
+ firmware_id[index] = strptr[index];
+ index++;
+ if (index == MAX_FIRMWARE_ID_LEN - 1)
+ break;
+ }
+
+ retval = sstrtoul(firmware_id, 10, (unsigned long *)fw_id);
+ kfree(firmware_id);
+ if (retval) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to obtain image firmware ID\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int fwu_get_device_config_id(void)
+{
+ int retval;
+ unsigned char config_id_size;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+ config_id_size = V7_CONFIG_ID_SIZE;
+ else
+ config_id_size = V5V6_CONFIG_ID_SIZE;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fwu->f34_fd.ctrl_base_addr,
+ fwu->config_id,
+ config_id_size);
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+static enum flash_area fwu_go_nogo(void)
+{
+ int retval;
+ enum flash_area flash_area = NONE;
+ unsigned char ii;
+ unsigned char config_id_size;
+ unsigned int device_fw_id;
+ unsigned int image_fw_id;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (fwu->force_update) {
+ flash_area = UI_FIRMWARE;
+ goto exit;
+ }
+
+ /* Update both UI and config if device is in bootloader mode */
+ if (fwu->bl_mode_device) {
+ flash_area = UI_FIRMWARE;
+ goto exit;
+ }
+
+ /* Get device firmware ID */
+ device_fw_id = rmi4_data->firmware_id;
+ dev_info(rmi4_data->pdev->dev.parent,
+ "%s: Device firmware ID = %d\n",
+ __func__, device_fw_id);
+
+ /* Get image firmware ID */
+ retval = fwu_get_image_firmware_id(&image_fw_id);
+ if (retval < 0) {
+ flash_area = NONE;
+ goto exit;
+ }
+ dev_info(rmi4_data->pdev->dev.parent,
+ "%s: Image firmware ID = %d\n",
+ __func__, image_fw_id);
+
+ if (image_fw_id > device_fw_id) {
+ flash_area = UI_FIRMWARE;
+ goto exit;
+ } else if (image_fw_id < device_fw_id) {
+ dev_info(rmi4_data->pdev->dev.parent,
+ "%s: Image firmware ID older than device firmware ID\n",
+ __func__);
+ flash_area = NONE;
+ goto exit;
+ }
+
+ /* Get device config ID */
+ retval = fwu_get_device_config_id();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read device config ID\n",
+ __func__);
+ flash_area = NONE;
+ goto exit;
+ }
+
+ if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+ config_id_size = V7_CONFIG_ID_SIZE;
+ else
+ config_id_size = V5V6_CONFIG_ID_SIZE;
+
+ for (ii = 0; ii < config_id_size; ii++) {
+ if (fwu->img.ui_config.data[ii] > fwu->config_id[ii]) {
+ flash_area = UI_CONFIG;
+ goto exit;
+ } else if (fwu->img.ui_config.data[ii] < fwu->config_id[ii]) {
+ flash_area = NONE;
+ goto exit;
+ }
+ }
+
+ flash_area = NONE;
+
+exit:
+ if (flash_area == NONE) {
+ dev_info(rmi4_data->pdev->dev.parent,
+ "%s: No need to do reflash\n",
+ __func__);
+ } else {
+ dev_info(rmi4_data->pdev->dev.parent,
+ "%s: Updating %s\n",
+ __func__,
+ flash_area == UI_FIRMWARE ?
+ "UI firmware and config" :
+ "UI config only");
+ }
+
+ return flash_area;
+}
+
+static int fwu_scan_pdt(void)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char intr_count = 0;
+ unsigned char intr_off;
+ unsigned char intr_src;
+ unsigned short addr;
+ bool f01found = false;
+ bool f34found = false;
+ bool f35found = false;
+ struct synaptics_rmi4_fn_desc rmi_fd;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ fwu->in_ub_mode = false;
+
+ for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ addr,
+ (unsigned char *)&rmi_fd,
+ sizeof(rmi_fd));
+ if (retval < 0)
+ return retval;
+
+ if (rmi_fd.fn_number) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Found F%02x\n",
+ __func__, rmi_fd.fn_number);
+ switch (rmi_fd.fn_number) {
+ case SYNAPTICS_RMI4_F01:
+ f01found = true;
+
+ rmi4_data->f01_query_base_addr =
+ rmi_fd.query_base_addr;
+ rmi4_data->f01_ctrl_base_addr =
+ rmi_fd.ctrl_base_addr;
+ rmi4_data->f01_data_base_addr =
+ rmi_fd.data_base_addr;
+ rmi4_data->f01_cmd_base_addr =
+ rmi_fd.cmd_base_addr;
+ break;
+ case SYNAPTICS_RMI4_F34:
+ f34found = true;
+ fwu->f34_fd.query_base_addr =
+ rmi_fd.query_base_addr;
+ fwu->f34_fd.ctrl_base_addr =
+ rmi_fd.ctrl_base_addr;
+ fwu->f34_fd.data_base_addr =
+ rmi_fd.data_base_addr;
+
+ switch (rmi_fd.fn_version) {
+ case F34_V0:
+ fwu->bl_version = BL_V5;
+ break;
+ case F34_V1:
+ fwu->bl_version = BL_V6;
+ break;
+ case F34_V2:
+ fwu->bl_version = BL_V7;
+ break;
+ default:
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Unrecognized F34 version\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ fwu->intr_mask = 0;
+ intr_src = rmi_fd.intr_src_count;
+ intr_off = intr_count % 8;
+ for (ii = intr_off;
+ ii < (intr_src + intr_off);
+ ii++) {
+ fwu->intr_mask |= 1 << ii;
+ }
+ break;
+ case SYNAPTICS_RMI4_F35:
+ f35found = true;
+ fwu->f35_fd.query_base_addr =
+ rmi_fd.query_base_addr;
+ fwu->f35_fd.ctrl_base_addr =
+ rmi_fd.ctrl_base_addr;
+ fwu->f35_fd.data_base_addr =
+ rmi_fd.data_base_addr;
+ fwu->f35_fd.cmd_base_addr =
+ rmi_fd.cmd_base_addr;
+ break;
+ }
+ } else {
+ break;
+ }
+
+ intr_count += rmi_fd.intr_src_count;
+ }
+
+ if (!f01found || !f34found) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to find both F01 and F34\n",
+ __func__);
+ if (!f35found) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to find F35\n",
+ __func__);
+ return -EINVAL;
+ } else {
+ fwu->in_ub_mode = true;
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: In microbootloader mode\n",
+ __func__);
+ fwu_recovery_check_status();
+ return 0;
+ }
+ }
+
+ rmi4_data->intr_mask[0] |= fwu->intr_mask;
+
+ addr = rmi4_data->f01_ctrl_base_addr + 1;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ addr,
+ &(rmi4_data->intr_mask[0]),
+ sizeof(rmi4_data->intr_mask[0]));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set interrupt enable bit\n",
+ __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+static int fwu_enter_flash_prog(void)
+{
+ int retval;
+ struct f01_device_control f01_device_control;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ retval = fwu_read_flash_status();
+ if (retval < 0)
+ return retval;
+
+ if (fwu->in_bl_mode)
+ return 0;
+
+ retval = rmi4_data->irq_enable(rmi4_data, false, true);
+ if (retval < 0)
+ return retval;
+
+ msleep(INT_DISABLE_WAIT_MS);
+
+ retval = fwu_write_f34_command(CMD_ENABLE_FLASH_PROG);
+ if (retval < 0)
+ return retval;
+
+ retval = fwu_wait_for_idle(ENABLE_WAIT_MS, false);
+ if (retval < 0)
+ return retval;
+
+ if (!fwu->in_bl_mode) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: BL mode not entered\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (rmi4_data->hw_if->bl_hw_init) {
+ retval = rmi4_data->hw_if->bl_hw_init(rmi4_data);
+ if (retval < 0)
+ return retval;
+ }
+
+ retval = fwu_scan_pdt();
+ if (retval < 0)
+ return retval;
+
+ retval = fwu_read_f34_queries();
+ if (retval < 0)
+ return retval;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ f01_device_control.data,
+ sizeof(f01_device_control.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read F01 device control\n",
+ __func__);
+ return retval;
+ }
+
+ f01_device_control.nosleep = true;
+ f01_device_control.sleep_mode = SLEEP_MODE_NORMAL;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ f01_device_control.data,
+ sizeof(f01_device_control.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write F01 device control\n",
+ __func__);
+ return retval;
+ }
+
+ msleep(ENTER_FLASH_PROG_WAIT_MS);
+
+ return retval;
+}
+
+static int fwu_check_ui_firmware_size(void)
+{
+ unsigned short block_count;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ block_count = fwu->img.ui_firmware.size / fwu->block_size;
+
+ if (block_count != fwu->blkcount.ui_firmware) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: UI firmware size mismatch\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fwu_check_ui_configuration_size(void)
+{
+ unsigned short block_count;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ block_count = fwu->img.ui_config.size / fwu->block_size;
+
+ if (block_count != fwu->blkcount.ui_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: UI configuration size mismatch\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fwu_check_dp_configuration_size(void)
+{
+ unsigned short block_count;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ block_count = fwu->img.dp_config.size / fwu->block_size;
+
+ if (block_count != fwu->blkcount.dp_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Display configuration size mismatch\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fwu_check_pm_configuration_size(void)
+{
+ unsigned short block_count;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ block_count = fwu->img.pm_config.size / fwu->block_size;
+
+ if (block_count != fwu->blkcount.pm_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Permanent configuration size mismatch\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fwu_check_bl_configuration_size(void)
+{
+ unsigned short block_count;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ block_count = fwu->img.bl_config.size / fwu->block_size;
+
+ if (block_count != fwu->blkcount.bl_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Bootloader configuration size mismatch\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fwu_check_guest_code_size(void)
+{
+ unsigned short block_count;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ block_count = fwu->img.guest_code.size / fwu->block_size;
+ if (block_count != fwu->blkcount.guest_code) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Guest code size mismatch\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fwu_erase_configuration(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ switch (fwu->config_area) {
+ case UI_CONFIG_AREA:
+ retval = fwu_write_f34_command(CMD_ERASE_UI_CONFIG);
+ if (retval < 0)
+ return retval;
+ break;
+ case DP_CONFIG_AREA:
+ retval = fwu_write_f34_command(CMD_ERASE_DISP_CONFIG);
+ if (retval < 0)
+ return retval;
+ break;
+ case BL_CONFIG_AREA:
+ retval = fwu_write_f34_command(CMD_ERASE_BL_CONFIG);
+ if (retval < 0)
+ return retval;
+ break;
+ case FLASH_CONFIG_AREA:
+ retval = fwu_write_f34_command(CMD_ERASE_FLASH_CONFIG);
+ if (retval < 0)
+ return retval;
+ break;
+ case UPP_AREA:
+ retval = fwu_write_f34_command(CMD_ERASE_UTILITY_PARAMETER);
+ if (retval < 0)
+ return retval;
+ default:
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Invalid config area\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Erase command written\n",
+ __func__);
+
+ retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+ if (retval < 0)
+ return retval;
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Idle status detected\n",
+ __func__);
+
+ return retval;
+}
+
+static int fwu_erase_bootloader(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ retval = fwu_write_f34_command(CMD_ERASE_BOOTLOADER);
+ if (retval < 0)
+ return retval;
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Erase command written\n",
+ __func__);
+
+ retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+ if (retval < 0)
+ return retval;
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Idle status detected\n",
+ __func__);
+
+ return 0;
+}
+
+#ifdef SYNA_TDDI
+static int fwu_erase_lockdown_data(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ retval = fwu_write_f34_command(CMD_ERASE_LOCKDOWN_DATA);
+ if (retval < 0)
+ return retval;
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Erase command written\n",
+ __func__);
+
+ msleep(100);
+
+ retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+ if (retval < 0)
+ return retval;
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Idle status detected\n",
+ __func__);
+
+ return 0;
+}
+
+#endif
+
+static int fwu_erase_guest_code(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ retval = fwu_write_f34_command(CMD_ERASE_GUEST_CODE);
+ if (retval < 0)
+ return retval;
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Erase command written\n",
+ __func__);
+
+ retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+ if (retval < 0)
+ return retval;
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Idle status detected\n",
+ __func__);
+
+ return 0;
+}
+
+static int fwu_erase_all(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (fwu->bl_version == BL_V7) {
+ retval = fwu_write_f34_command(CMD_ERASE_UI_FIRMWARE);
+ if (retval < 0)
+ return retval;
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Erase command written\n",
+ __func__);
+
+ retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+ if (retval < 0)
+ return retval;
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Idle status detected\n",
+ __func__);
+
+ fwu->config_area = UI_CONFIG_AREA;
+ retval = fwu_erase_configuration();
+ if (retval < 0)
+ return retval;
+ } else {
+ retval = fwu_write_f34_command(CMD_ERASE_ALL);
+ if (retval < 0)
+ return retval;
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Erase all command written\n",
+ __func__);
+
+ retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+ if (!(fwu->bl_version == BL_V8 &&
+ fwu->flash_status == BAD_PARTITION_TABLE)) {
+ if (retval < 0)
+ return retval;
+ }
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Idle status detected\n",
+ __func__);
+
+ if (fwu->bl_version == BL_V8)
+ return 0;
+ }
+
+ if (fwu->flash_properties.has_disp_config) {
+ fwu->config_area = DP_CONFIG_AREA;
+ retval = fwu_erase_configuration();
+ if (retval < 0)
+ return retval;
+ }
+
+ if (fwu->has_guest_code) {
+ retval = fwu_erase_guest_code();
+ if (retval < 0)
+ return retval;
+ }
+
+ return 0;
+}
+
+static int fwu_write_firmware(void)
+{
+ unsigned short firmware_block_count;
+
+ firmware_block_count = fwu->img.ui_firmware.size / fwu->block_size;
+
+ return fwu_write_f34_blocks((unsigned char *)fwu->img.ui_firmware.data,
+ firmware_block_count, CMD_WRITE_FW);
+}
+
+static int fwu_write_bootloader(void)
+{
+ int retval;
+ unsigned short bootloader_block_count;
+
+ bootloader_block_count = fwu->img.bl_image.size / fwu->block_size;
+
+ fwu->write_bootloader = true;
+ retval = fwu_write_f34_blocks((unsigned char *)fwu->img.bl_image.data,
+ bootloader_block_count, CMD_WRITE_BOOTLOADER);
+ fwu->write_bootloader = false;
+
+ return retval;
+}
+
+static int fwu_write_utility_parameter(void)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char checksum_array[4];
+ unsigned char *pbuf;
+ unsigned short remaining_size;
+ unsigned short utility_param_size;
+ unsigned long checksum;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ utility_param_size = fwu->blkcount.utility_param * fwu->block_size;
+ retval = fwu_allocate_read_config_buf(utility_param_size);
+ if (retval < 0)
+ return retval;
+ memset(fwu->read_config_buf, 0x00, utility_param_size);
+
+ pbuf = fwu->read_config_buf;
+ remaining_size = utility_param_size - 4;
+
+ for (ii = 0; ii < MAX_UTILITY_PARAMS; ii++) {
+ if (fwu->img.utility_param_id[ii] == UNUSED)
+ continue;
+
+#ifdef F51_DISCRETE_FORCE
+ if (fwu->img.utility_param_id[ii] == FORCE_PARAMETER) {
+ if (fwu->bl_mode_device) {
+ dev_info(rmi4_data->pdev->dev.parent,
+ "%s: Device in bootloader mode, skipping calibration data restoration\n",
+ __func__);
+ goto image_param;
+ }
+ retval = secure_memcpy(&(pbuf[4]),
+ remaining_size - 4,
+ fwu->cal_data,
+ fwu->cal_data_buf_size,
+ fwu->cal_data_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy force calibration data\n",
+ __func__);
+ return retval;
+ }
+ pbuf[0] = FORCE_PARAMETER;
+ pbuf[1] = 0x00;
+ pbuf[2] = (4 + fwu->cal_data_size) / 2;
+ pbuf += (fwu->cal_data_size + 4);
+ remaining_size -= (fwu->cal_data_size + 4);
+ continue;
+ }
+image_param:
+#endif
+
+ retval = secure_memcpy(pbuf,
+ remaining_size,
+ fwu->img.utility_param[ii].data,
+ fwu->img.utility_param[ii].size,
+ fwu->img.utility_param[ii].size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy utility parameter data\n",
+ __func__);
+ return retval;
+ }
+ pbuf += fwu->img.utility_param[ii].size;
+ remaining_size -= fwu->img.utility_param[ii].size;
+ }
+
+ calculate_checksum((unsigned short *)fwu->read_config_buf,
+ ((utility_param_size - 4) / 2),
+ &checksum);
+
+ convert_to_little_endian(checksum_array, checksum);
+
+ fwu->read_config_buf[utility_param_size - 4] = checksum_array[0];
+ fwu->read_config_buf[utility_param_size - 3] = checksum_array[1];
+ fwu->read_config_buf[utility_param_size - 2] = checksum_array[2];
+ fwu->read_config_buf[utility_param_size - 1] = checksum_array[3];
+
+ retval = fwu_write_f34_blocks((unsigned char *)fwu->read_config_buf,
+ fwu->blkcount.utility_param, CMD_WRITE_UTILITY_PARAM);
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+static int fwu_write_configuration(void)
+{
+ return fwu_write_f34_blocks((unsigned char *)fwu->config_data,
+ fwu->config_block_count, CMD_WRITE_CONFIG);
+}
+
+static int fwu_write_ui_configuration(void)
+{
+ fwu->config_area = UI_CONFIG_AREA;
+ fwu->config_data = fwu->img.ui_config.data;
+ fwu->config_size = fwu->img.ui_config.size;
+ fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+ return fwu_write_configuration();
+}
+
+static int fwu_write_dp_configuration(void)
+{
+ fwu->config_area = DP_CONFIG_AREA;
+ fwu->config_data = fwu->img.dp_config.data;
+ fwu->config_size = fwu->img.dp_config.size;
+ fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+ return fwu_write_configuration();
+}
+
+static int fwu_write_pm_configuration(void)
+{
+ fwu->config_area = PM_CONFIG_AREA;
+ fwu->config_data = fwu->img.pm_config.data;
+ fwu->config_size = fwu->img.pm_config.size;
+ fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+ return fwu_write_configuration();
+}
+
+#ifdef SYNA_TDDI
+static int fwu_write_tddi_lockdown_data(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ retval = fwu_write_f34_blocks(fwu->read_config_buf,
+ fwu->blkcount.tddi_lockdown_data,
+ CMD_WRITE_LOCKDOWN_DATA);
+ if (retval < 0)
+ return retval;
+ rmi4_data->reset_device(rmi4_data, false);
+ return 0;
+}
+#endif
+
+static int fwu_write_flash_configuration(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ fwu->config_area = FLASH_CONFIG_AREA;
+ fwu->config_data = fwu->img.fl_config.data;
+ fwu->config_size = fwu->img.fl_config.size;
+ fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+ if (fwu->config_block_count != fwu->blkcount.fl_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Flash configuration size mismatch\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ retval = fwu_erase_configuration();
+ if (retval < 0)
+ return retval;
+
+ retval = fwu_write_configuration();
+ if (retval < 0)
+ return retval;
+
+ rmi4_data->reset_device(rmi4_data, false);
+
+ return 0;
+}
+
+static int fwu_write_guest_code(void)
+{
+ int retval;
+ unsigned short guest_code_block_count;
+
+ guest_code_block_count = fwu->img.guest_code.size / fwu->block_size;
+
+ retval = fwu_write_f34_blocks((unsigned char *)fwu->img.guest_code.data,
+ guest_code_block_count, CMD_WRITE_GUEST_CODE);
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+static int fwu_write_lockdown(void)
+{
+ unsigned short lockdown_block_count;
+
+ lockdown_block_count = fwu->img.lockdown.size / fwu->block_size;
+
+ return fwu_write_f34_blocks((unsigned char *)fwu->img.lockdown.data,
+ lockdown_block_count, CMD_WRITE_LOCKDOWN);
+}
+
+static int fwu_write_partition_table_v8(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ fwu->config_area = FLASH_CONFIG_AREA;
+ fwu->config_data = fwu->img.fl_config.data;
+ fwu->config_size = fwu->img.fl_config.size;
+ fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+ if (fwu->config_block_count != fwu->blkcount.fl_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Flash configuration size mismatch\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ retval = fwu_write_configuration();
+ if (retval < 0)
+ return retval;
+
+ rmi4_data->reset_device(rmi4_data, false);
+
+ return 0;
+}
+
+static int fwu_write_partition_table_v7(void)
+{
+ int retval;
+ unsigned short block_count;
+
+ block_count = fwu->blkcount.bl_config;
+ fwu->config_area = BL_CONFIG_AREA;
+ fwu->config_size = fwu->block_size * block_count;
+
+ retval = fwu_allocate_read_config_buf(fwu->config_size);
+ if (retval < 0)
+ return retval;
+
+ retval = fwu_read_f34_blocks(block_count, CMD_READ_CONFIG);
+ if (retval < 0)
+ return retval;
+
+ retval = fwu_erase_configuration();
+ if (retval < 0)
+ return retval;
+
+ retval = fwu_write_flash_configuration();
+ if (retval < 0)
+ return retval;
+
+ fwu->config_area = BL_CONFIG_AREA;
+ fwu->config_data = fwu->read_config_buf;
+ fwu->config_size = fwu->img.bl_config.size;
+ fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+ retval = fwu_write_configuration();
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+static int fwu_write_bl_area_v7(void)
+{
+ int retval;
+ bool has_utility_param;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ has_utility_param = fwu->has_utility_param;
+
+ if (fwu->has_utility_param) {
+ fwu->config_area = UPP_AREA;
+ retval = fwu_erase_configuration();
+ if (retval < 0)
+ return retval;
+ }
+
+ fwu->config_area = BL_CONFIG_AREA;
+ retval = fwu_erase_configuration();
+ if (retval < 0)
+ return retval;
+
+ fwu->config_area = FLASH_CONFIG_AREA;
+ retval = fwu_erase_configuration();
+ if (retval < 0)
+ return retval;
+
+ retval = fwu_erase_bootloader();
+ if (retval < 0)
+ return retval;
+
+ retval = fwu_write_bootloader();
+ if (retval < 0)
+ return retval;
+
+ msleep(rmi4_data->hw_if->board_data->reset_delay_ms);
+ rmi4_data->reset_device(rmi4_data, false);
+
+ fwu->config_area = FLASH_CONFIG_AREA;
+ fwu->config_data = fwu->img.fl_config.data;
+ fwu->config_size = fwu->img.fl_config.size;
+ fwu->config_block_count = fwu->config_size / fwu->block_size;
+ retval = fwu_write_configuration();
+ if (retval < 0)
+ return retval;
+ rmi4_data->reset_device(rmi4_data, false);
+
+ fwu->config_area = BL_CONFIG_AREA;
+ fwu->config_data = fwu->img.bl_config.data;
+ fwu->config_size = fwu->img.bl_config.size;
+ fwu->config_block_count = fwu->config_size / fwu->block_size;
+ retval = fwu_write_configuration();
+ if (retval < 0)
+ return retval;
+
+ if (fwu->img.contains_utility_param) {
+ retval = fwu_write_utility_parameter();
+ if (retval < 0)
+ return retval;
+ }
+
+ return 0;
+}
+
+static int fwu_do_reflash(void)
+{
+ int retval;
+ bool do_bl_update = false;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (!fwu->new_partition_table) {
+ retval = fwu_check_ui_firmware_size();
+ if (retval < 0)
+ return retval;
+
+ retval = fwu_check_ui_configuration_size();
+ if (retval < 0)
+ return retval;
+
+ if (fwu->flash_properties.has_disp_config &&
+ fwu->img.contains_disp_config) {
+ retval = fwu_check_dp_configuration_size();
+ if (retval < 0)
+ return retval;
+ }
+
+ if (fwu->has_guest_code && fwu->img.contains_guest_code) {
+ retval = fwu_check_guest_code_size();
+ if (retval < 0)
+ return retval;
+ }
+ } else if (fwu->bl_version == BL_V7) {
+ retval = fwu_check_bl_configuration_size();
+ if (retval < 0)
+ return retval;
+ }
+
+ if (!fwu->has_utility_param && fwu->img.contains_utility_param) {
+ if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+ do_bl_update = true;
+ }
+
+ if (fwu->has_utility_param && !fwu->img.contains_utility_param) {
+ if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+ do_bl_update = true;
+ }
+
+ if (!do_bl_update && fwu->incompatible_partition_tables) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Incompatible partition tables\n",
+ __func__);
+ return -EINVAL;
+ } else if (!do_bl_update && fwu->new_partition_table) {
+ if (!fwu->force_update) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Partition table mismatch\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ retval = fwu_erase_all();
+ if (retval < 0)
+ return retval;
+
+ if (do_bl_update) {
+ retval = fwu_write_bl_area_v7();
+ if (retval < 0)
+ return retval;
+ pr_notice("%s: Bootloader area programmed\n", __func__);
+ } else if (fwu->bl_version == BL_V7 && fwu->new_partition_table) {
+ retval = fwu_write_partition_table_v7();
+ if (retval < 0)
+ return retval;
+ pr_notice("%s: Partition table programmed\n", __func__);
+ } else if (fwu->bl_version == BL_V8) {
+ retval = fwu_write_partition_table_v8();
+ if (retval < 0)
+ return retval;
+ pr_notice("%s: Partition table programmed\n", __func__);
+ }
+
+ fwu->config_area = UI_CONFIG_AREA;
+ if (fwu->flash_properties.has_disp_config &&
+ fwu->img.contains_disp_config) {
+ retval = fwu_write_dp_configuration();
+ if (retval < 0)
+ return retval;
+ pr_notice("%s: Display configuration programmed\n", __func__);
+ }
+
+ retval = fwu_write_ui_configuration();
+ if (retval < 0)
+ return retval;
+ pr_notice("%s: Configuration programmed\n", __func__);
+
+ if (fwu->has_guest_code && fwu->img.contains_guest_code) {
+ retval = fwu_write_guest_code();
+ if (retval < 0)
+ return retval;
+ pr_notice("%s: Guest code programmed\n", __func__);
+ }
+
+ retval = fwu_write_firmware();
+ if (retval < 0)
+ return retval;
+ pr_notice("%s: Firmware programmed\n", __func__);
+
+ return retval;
+}
+
+static int fwu_do_read_config(void)
+{
+ int retval;
+ unsigned short block_count;
+ unsigned short config_area;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ switch (fwu->config_area) {
+ case UI_CONFIG_AREA:
+ block_count = fwu->blkcount.ui_config;
+ break;
+ case DP_CONFIG_AREA:
+ if (!fwu->flash_properties.has_disp_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Display configuration not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ block_count = fwu->blkcount.dp_config;
+ break;
+ case PM_CONFIG_AREA:
+ if (!fwu->flash_properties.has_pm_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Permanent configuration not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ block_count = fwu->blkcount.pm_config;
+ break;
+ case BL_CONFIG_AREA:
+ if (!fwu->flash_properties.has_bl_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Bootloader configuration not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ block_count = fwu->blkcount.bl_config;
+ break;
+ case UPP_AREA:
+ if (!fwu->has_utility_param) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Utility parameter not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ block_count = fwu->blkcount.utility_param;
+ break;
+#ifdef SYNA_TDDI
+ case TDDI_FORCE_CONFIG_AREA:
+ if (!fwu->has_force_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: force configuration not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ block_count = fwu->blkcount.tddi_force_config;
+ break;
+ case TDDI_OEM_DATA_AREA:
+ if (!fwu->has_oem_data) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: oem data not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ block_count = fwu->blkcount.tddi_oem_data;
+ break;
+ case TDDI_LCM_DATA_AREA:
+ if (!fwu->has_lcm_data) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: lcm data not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ block_count = fwu->blkcount.tddi_lcm_data;
+ break;
+#endif
+ default:
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Invalid config area\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (block_count == 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Invalid block count\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+ if (fwu->bl_version == BL_V5 || fwu->bl_version == BL_V6) {
+ config_area = fwu->config_area;
+ retval = fwu_enter_flash_prog();
+ fwu->config_area = config_area;
+ if (retval < 0)
+ goto exit;
+ }
+
+ fwu->config_size = fwu->block_size * block_count;
+
+ retval = fwu_allocate_read_config_buf(fwu->config_size);
+ if (retval < 0)
+ goto exit;
+
+ retval = fwu_read_f34_blocks(block_count, CMD_READ_CONFIG);
+
+exit:
+ if (fwu->bl_version == BL_V5 || fwu->bl_version == BL_V6)
+ rmi4_data->reset_device(rmi4_data, false);
+
+ mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+ return retval;
+}
+
+#ifdef SYNA_TDDI
+static int fwu_do_read_tddi_lockdown_data(void)
+{
+ int retval = -EINVAL;
+ unsigned short block_count;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ block_count = fwu->blkcount.tddi_lockdown_data;
+ fwu->config_size = fwu->block_size * block_count;
+
+ if (fwu->bl_version != BL_V6) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Not support lockdown data in bl v.%d\n",
+ __func__,
+ fwu->bl_version);
+ goto exit;
+ } else if (!fwu->has_lockdown_data) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Not support lockdown data\n", __func__);
+ goto exit;
+ }
+
+ kfree(fwu->read_config_buf);
+
+ fwu->read_config_buf = kzalloc(fwu->config_size, GFP_KERNEL);
+
+ if (!fwu->read_config_buf) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for fwu->read_config_buf\n",
+ __func__);
+ fwu->read_config_buf_size = 0;
+ retval = -ENOMEM;
+ goto exit;
+ }
+ fwu->read_config_buf_size = fwu->config_size;
+ retval = fwu_read_f34_blocks(block_count, CMD_READ_LOCKDOWN_DATA);
+exit:
+ return retval;
+}
+
+int get_tddi_lockdown_data(unsigned char *lockdown_data, unsigned short leng)
+{
+ int retval;
+
+ retval = fwu_do_read_tddi_lockdown_data();
+ if (retval < 0)
+ return retval;
+ memcpy(lockdown_data, fwu->read_config_buf, leng);
+ return retval;
+}
+
+int set_tddi_lockdown_data(unsigned char *lockdown_data, unsigned short leng)
+{
+ int retval = -EINVAL;
+ unsigned long checksum;
+ unsigned char checksum_array[4];
+ unsigned short blk_cnt;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (fwu->bl_version != BL_V6) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Not support lockdown data in bl v.%d\n",
+ __func__,
+ fwu->bl_version);
+ goto exit;
+ } else if (!fwu->has_lockdown_data) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Not support lockdown data\n", __func__);
+ goto exit;
+ }
+
+ retval = fwu_enter_flash_prog();
+ if (retval < 0)
+ goto exit;
+
+ retval = fwu_erase_lockdown_data();
+ if (retval < 0)
+ goto exit;
+
+ blk_cnt = fwu->blkcount.tddi_lockdown_data;
+
+ fwu->config_size = fwu->blkcount.tddi_lockdown_data * fwu->block_size;
+ retval = fwu_allocate_read_config_buf(fwu->config_size);
+ if (retval < 0)
+ goto exit;
+ memset(fwu->read_config_buf, 0x00, fwu->config_size);
+ retval = secure_memcpy(fwu->read_config_buf, fwu->config_size,
+ lockdown_data, leng, leng);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy tddi lockdwon data\n",
+ __func__);
+ goto exit;
+ }
+
+ calculate_checksum((unsigned short *)fwu->read_config_buf,
+ ((fwu->config_size - 4) / 2),
+ &checksum);
+
+ convert_to_little_endian(checksum_array, checksum);
+
+ fwu->read_config_buf[blk_cnt * fwu->block_size - 4] = checksum_array[0];
+ fwu->read_config_buf[blk_cnt * fwu->block_size - 3] = checksum_array[1];
+ fwu->read_config_buf[blk_cnt * fwu->block_size - 2] = checksum_array[2];
+ fwu->read_config_buf[blk_cnt * fwu->block_size - 1] = checksum_array[3];
+ retval = fwu_write_tddi_lockdown_data();
+exit:
+ return retval;
+}
+#endif
+
+static int fwu_do_lockdown_v7(void)
+{
+ int retval;
+ struct f34_v7_data0 status;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ retval = fwu_enter_flash_prog();
+ if (retval < 0)
+ return retval;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fwu->f34_fd.data_base_addr + fwu->off.flash_status,
+ status.data,
+ sizeof(status.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read flash status\n",
+ __func__);
+ return retval;
+ }
+
+ if (status.device_cfg_status == 2) {
+ dev_info(rmi4_data->pdev->dev.parent,
+ "%s: Device already locked down\n",
+ __func__);
+ return 0;
+ }
+
+ retval = fwu_write_lockdown();
+ if (retval < 0)
+ return retval;
+
+ pr_notice("%s: Lockdown programmed\n", __func__);
+
+ return retval;
+}
+
+static int fwu_do_lockdown_v5v6(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+#ifdef SYNA_TDDI
+ unsigned char *img_ld;
+
+ img_ld = (unsigned char *)fwu->img.lockdown.data;
+ if (fwu->has_lockdown_data) {
+ retval = set_tddi_lockdown_data(img_ld,
+ LOCKDOWN_SIZE);
+ if (retval < 0)
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write lockdown data\n",
+ __func__);
+ return retval;
+ }
+#endif
+
+ retval = fwu_enter_flash_prog();
+ if (retval < 0)
+ return retval;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fwu->f34_fd.query_base_addr + fwu->off.properties,
+ fwu->flash_properties.data,
+ sizeof(fwu->flash_properties.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read flash properties\n",
+ __func__);
+ return retval;
+ }
+
+ if (fwu->flash_properties.unlocked == 0) {
+ dev_info(rmi4_data->pdev->dev.parent,
+ "%s: Device already locked down\n",
+ __func__);
+ return 0;
+ }
+
+ retval = fwu_write_lockdown();
+ if (retval < 0)
+ return retval;
+
+ pr_notice("%s: Lockdown programmed\n", __func__);
+
+ return retval;
+}
+
+#ifdef F51_DISCRETE_FORCE
+static int fwu_do_restore_f51_cal_data(void)
+{
+ int retval;
+ unsigned char checksum_array[4];
+ unsigned short block_count;
+ unsigned long checksum;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ block_count = fwu->blkcount.ui_config;
+ fwu->config_size = fwu->block_size * block_count;
+ fwu->config_area = UI_CONFIG_AREA;
+
+ retval = fwu_allocate_read_config_buf(fwu->config_size);
+ if (retval < 0)
+ return retval;
+
+ retval = fwu_read_f34_blocks(block_count, CMD_READ_CONFIG);
+ if (retval < 0)
+ return retval;
+
+ retval = secure_memcpy(&fwu->read_config_buf[fwu->cal_data_off],
+ fwu->cal_data_size, fwu->cal_data,
+ fwu->cal_data_buf_size, fwu->cal_data_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to restore calibration data\n",
+ __func__);
+ return retval;
+ }
+
+ calculate_checksum((unsigned short *)fwu->read_config_buf,
+ ((fwu->config_size - 4) / 2),
+ &checksum);
+
+ convert_to_little_endian(checksum_array, checksum);
+
+ fwu->read_config_buf[fwu->config_size - 4] = checksum_array[0];
+ fwu->read_config_buf[fwu->config_size - 3] = checksum_array[1];
+ fwu->read_config_buf[fwu->config_size - 2] = checksum_array[2];
+ fwu->read_config_buf[fwu->config_size - 1] = checksum_array[3];
+
+ retval = fwu_enter_flash_prog();
+ if (retval < 0)
+ return retval;
+
+ fwu->config_area = UI_CONFIG_AREA;
+ fwu->config_data = fwu->read_config_buf;
+ fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+ retval = fwu_erase_configuration();
+ if (retval < 0)
+ return retval;
+
+ retval = fwu_write_configuration();
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+#endif
+
+static int fwu_start_write_guest_code(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ retval = fwu_parse_image_info();
+ if (retval < 0)
+ return -EINVAL;
+
+ if (!fwu->has_guest_code) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Guest code not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (!fwu->img.contains_guest_code) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: No guest code in firmware image\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (rmi4_data->sensor_sleep) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Sensor sleeping\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ rmi4_data->stay_awake = true;
+
+ mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+ pr_notice("%s: Start of write guest code process\n", __func__);
+
+ retval = fwu_enter_flash_prog();
+ if (retval < 0)
+ goto exit;
+
+ retval = fwu_check_guest_code_size();
+ if (retval < 0)
+ goto exit;
+
+ retval = fwu_erase_guest_code();
+ if (retval < 0)
+ goto exit;
+
+ retval = fwu_write_guest_code();
+ if (retval < 0)
+ goto exit;
+
+ pr_notice("%s: Guest code programmed\n", __func__);
+
+exit:
+ rmi4_data->reset_device(rmi4_data, false);
+
+ pr_notice("%s: End of write guest code process\n", __func__);
+
+ mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+ rmi4_data->stay_awake = false;
+
+ return retval;
+}
+
+static int fwu_start_write_config(void)
+{
+ int retval;
+ unsigned short config_area;
+ unsigned int device_fw_id;
+ unsigned int image_fw_id;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ retval = fwu_parse_image_info();
+ if (retval < 0)
+ return -EINVAL;
+
+ switch (fwu->config_area) {
+ case UI_CONFIG_AREA:
+ device_fw_id = rmi4_data->firmware_id;
+ retval = fwu_get_image_firmware_id(&image_fw_id);
+ if (retval < 0)
+ return retval;
+ if (device_fw_id != image_fw_id) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Device and image firmware IDs don't match\n",
+ __func__);
+ return -EINVAL;
+ }
+ retval = fwu_check_ui_configuration_size();
+ if (retval < 0)
+ return retval;
+ break;
+ case DP_CONFIG_AREA:
+ if (!fwu->flash_properties.has_disp_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Display configuration not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (!fwu->img.contains_disp_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: No display configuration in firmware image\n",
+ __func__);
+ return -EINVAL;
+ }
+ retval = fwu_check_dp_configuration_size();
+ if (retval < 0)
+ return retval;
+ break;
+ case PM_CONFIG_AREA:
+ if (!fwu->flash_properties.has_pm_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Permanent configuration not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (!fwu->img.contains_perm_config) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: No permanent configuration in firmware image\n",
+ __func__);
+ return -EINVAL;
+ }
+ retval = fwu_check_pm_configuration_size();
+ if (retval < 0)
+ return retval;
+ break;
+ default:
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Configuration not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (rmi4_data->sensor_sleep) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Sensor sleeping\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ rmi4_data->stay_awake = true;
+
+ mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+ pr_notice("%s: Start of write config process\n", __func__);
+
+ config_area = fwu->config_area;
+
+ retval = fwu_enter_flash_prog();
+ if (retval < 0)
+ goto exit;
+
+ fwu->config_area = config_area;
+
+ if (fwu->config_area != PM_CONFIG_AREA) {
+ retval = fwu_erase_configuration();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to erase config\n",
+ __func__);
+ goto exit;
+ }
+ }
+
+ switch (fwu->config_area) {
+ case UI_CONFIG_AREA:
+ retval = fwu_write_ui_configuration();
+ if (retval < 0)
+ goto exit;
+ break;
+ case DP_CONFIG_AREA:
+ retval = fwu_write_dp_configuration();
+ if (retval < 0)
+ goto exit;
+ break;
+ case PM_CONFIG_AREA:
+ retval = fwu_write_pm_configuration();
+ if (retval < 0)
+ goto exit;
+ break;
+ }
+
+ pr_notice("%s: Config written\n", __func__);
+
+exit:
+ switch (fwu->config_area) {
+ case UI_CONFIG_AREA:
+ rmi4_data->reset_device(rmi4_data, true);
+ break;
+ case DP_CONFIG_AREA:
+ case PM_CONFIG_AREA:
+ rmi4_data->reset_device(rmi4_data, false);
+ break;
+ }
+
+ pr_notice("%s: End of write config process\n", __func__);
+
+ mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+ rmi4_data->stay_awake = false;
+
+ return retval;
+}
+
+static int fwu_start_reflash(void)
+{
+ int retval = 0;
+ enum flash_area flash_area;
+ bool do_rebuild = false;
+ const struct firmware *fw_entry = NULL;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (rmi4_data->sensor_sleep) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Sensor sleeping\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ rmi4_data->stay_awake = true;
+
+ mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+ pr_notice("%s: Start of reflash process\n", __func__);
+
+ if (fwu->image == NULL) {
+ retval = secure_memcpy(fwu->image_name, MAX_IMAGE_NAME_LEN,
+ FW_IMAGE_NAME, sizeof(FW_IMAGE_NAME),
+ sizeof(FW_IMAGE_NAME));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy image file name\n",
+ __func__);
+ goto exit;
+ }
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Requesting firmware image %s\n",
+ __func__, fwu->image_name);
+
+ retval = request_firmware(&fw_entry, fwu->image_name,
+ rmi4_data->pdev->dev.parent);
+ if (retval != 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Firmware image %s not available\n",
+ __func__, fwu->image_name);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Firmware image size = %d\n",
+ __func__, (unsigned int)fw_entry->size);
+
+ fwu->image = fw_entry->data;
+ }
+
+ retval = fwu_parse_image_info();
+ if (retval < 0)
+ goto exit;
+
+ if (fwu->blkcount.total_count != fwu->img.blkcount.total_count) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Flash size mismatch\n",
+ __func__);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (fwu->bl_version != fwu->img.bl_version) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Bootloader version mismatch\n",
+ __func__);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ retval = fwu_read_flash_status();
+ if (retval < 0)
+ goto exit;
+
+ if (fwu->in_bl_mode) {
+ fwu->bl_mode_device = true;
+ dev_info(rmi4_data->pdev->dev.parent,
+ "%s: Device in bootloader mode\n",
+ __func__);
+ } else {
+ fwu->bl_mode_device = false;
+ }
+
+ flash_area = fwu_go_nogo();
+
+ if (flash_area != NONE) {
+ retval = fwu_enter_flash_prog();
+ if (retval < 0) {
+ rmi4_data->reset_device(rmi4_data, false);
+ goto exit;
+ }
+ }
+
+#ifdef F51_DISCRETE_FORCE
+ if (flash_area != NONE && !fwu->bl_mode_device) {
+ fwu->config_size = fwu->block_size * fwu->blkcount.ui_config;
+ fwu->config_area = UI_CONFIG_AREA;
+
+ retval = fwu_allocate_read_config_buf(fwu->config_size);
+ if (retval < 0) {
+ rmi4_data->reset_device(rmi4_data, false);
+ goto exit;
+ }
+
+ retval = fwu_read_f34_blocks(fwu->blkcount.ui_config,
+ CMD_READ_CONFIG);
+ if (retval < 0) {
+ rmi4_data->reset_device(rmi4_data, false);
+ goto exit;
+ }
+
+ retval = secure_memcpy(fwu->cal_data, fwu->cal_data_buf_size,
+ &fwu->read_config_buf[fwu->cal_data_off],
+ fwu->cal_data_size, fwu->cal_data_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to save calibration data\n",
+ __func__);
+ rmi4_data->reset_device(rmi4_data, false);
+ goto exit;
+ }
+ }
+#endif
+
+ switch (flash_area) {
+ case UI_FIRMWARE:
+ do_rebuild = true;
+ retval = fwu_do_reflash();
+#ifdef F51_DISCRETE_FORCE
+ if (retval < 0)
+ break;
+
+ if (fwu->has_utility_param || fwu->img.contains_utility_param)
+ break;
+
+ rmi4_data->reset_device(rmi4_data, false);
+
+ if (fwu->bl_mode_device || fwu->in_bl_mode) {
+ dev_info(rmi4_data->pdev->dev.parent,
+ "%s: Device in bootloader mode, skipping calibration data restoration\n",
+ __func__);
+ break;
+ }
+
+ retval = fwu_do_restore_f51_cal_data();
+#endif
+ break;
+ case UI_CONFIG:
+ do_rebuild = true;
+ retval = fwu_check_ui_configuration_size();
+ if (retval < 0)
+ break;
+ fwu->config_area = UI_CONFIG_AREA;
+ retval = fwu_erase_configuration();
+ if (retval < 0)
+ break;
+ retval = fwu_write_ui_configuration();
+#ifdef F51_DISCRETE_FORCE
+ if (retval < 0)
+ break;
+
+ if (fwu->has_utility_param)
+ break;
+
+ retval = fwu_do_restore_f51_cal_data();
+#endif
+ break;
+ case NONE:
+ default:
+ break;
+ }
+
+ if (retval < 0) {
+ do_rebuild = false;
+ rmi4_data->reset_device(rmi4_data, false);
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to do reflash\n",
+ __func__);
+ goto exit;
+ }
+
+ if (fwu->do_lockdown && (fwu->img.lockdown.data != NULL)) {
+ switch (fwu->bl_version) {
+ case BL_V5:
+ case BL_V6:
+ retval = fwu_do_lockdown_v5v6();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to do lockdown\n",
+ __func__);
+ }
+ rmi4_data->reset_device(rmi4_data, false);
+ break;
+ case BL_V7:
+ case BL_V8:
+ retval = fwu_do_lockdown_v7();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to do lockdown\n",
+ __func__);
+ }
+ rmi4_data->reset_device(rmi4_data, false);
+ break;
+ default:
+ break;
+ }
+ }
+
+exit:
+ if (fw_entry)
+ release_firmware(fw_entry);
+
+ if (do_rebuild)
+ rmi4_data->reset_device(rmi4_data, true);
+
+ pr_notice("%s: End of reflash process\n", __func__);
+
+ mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+ rmi4_data->stay_awake = false;
+
+ return retval;
+}
+
+static int fwu_recovery_check_status(void)
+{
+ int retval;
+ unsigned char data_base;
+ unsigned char status;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ data_base = fwu->f35_fd.data_base_addr;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ data_base + F35_ERROR_CODE_OFFSET,
+ &status,
+ 1);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read status\n",
+ __func__);
+ return retval;
+ }
+
+ status = status & MASK_5BIT;
+
+ if (status != 0x00) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Recovery mode status = %d\n",
+ __func__, status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fwu_recovery_erase_completion(void)
+{
+ int retval;
+ unsigned char data_base;
+ unsigned char command;
+ unsigned char status;
+ unsigned int timeout = F35_ERASE_ALL_WAIT_MS / 20;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ data_base = fwu->f35_fd.data_base_addr;
+
+ do {
+ command = 0x01;
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ fwu->f35_fd.cmd_base_addr,
+ &command,
+ sizeof(command));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to issue command\n",
+ __func__);
+ return retval;
+ }
+
+ do {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ fwu->f35_fd.cmd_base_addr,
+ &command,
+ sizeof(command));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read command status\n",
+ __func__);
+ return retval;
+ }
+
+ if ((command & 0x01) == 0x00)
+ break;
+
+ msleep(20);
+ timeout--;
+ } while (timeout > 0);
+
+ if (timeout == 0)
+ goto exit;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ data_base + F35_FLASH_STATUS_OFFSET,
+ &status,
+ sizeof(status));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read flash status\n",
+ __func__);
+ return retval;
+ }
+
+ if ((status & 0x01) == 0x00)
+ break;
+
+ msleep(20);
+ timeout--;
+ } while (timeout > 0);
+
+exit:
+ if (timeout == 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Timed out waiting for flash erase completion\n",
+ __func__);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int fwu_recovery_erase_all(void)
+{
+ int retval;
+ unsigned char ctrl_base;
+ unsigned char command = CMD_F35_ERASE_ALL;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ ctrl_base = fwu->f35_fd.ctrl_base_addr;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ ctrl_base + F35_CHUNK_COMMAND_OFFSET,
+ &command,
+ sizeof(command));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to issue erase all command\n",
+ __func__);
+ return retval;
+ }
+
+ if (fwu->f35_fd.cmd_base_addr) {
+ retval = fwu_recovery_erase_completion();
+ if (retval < 0)
+ return retval;
+ } else {
+ msleep(F35_ERASE_ALL_WAIT_MS);
+ }
+
+ retval = fwu_recovery_check_status();
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+static int fwu_recovery_write_chunk(void)
+{
+ int retval;
+ unsigned char ctrl_base;
+ unsigned char chunk_number[] = {0, 0};
+ unsigned char chunk_spare;
+ unsigned char chunk_size;
+ unsigned char buf[F35_CHUNK_SIZE + 1];
+ unsigned short chunk;
+ unsigned short chunk_total;
+ unsigned short bytes_written = 0;
+ unsigned char *chunk_ptr = (unsigned char *)fwu->image;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ ctrl_base = fwu->f35_fd.ctrl_base_addr;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ ctrl_base + F35_CHUNK_NUM_LSB_OFFSET,
+ chunk_number,
+ sizeof(chunk_number));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write chunk number\n",
+ __func__);
+ return retval;
+ }
+
+ buf[sizeof(buf) - 1] = CMD_F35_WRITE_CHUNK;
+
+ chunk_total = fwu->image_size / F35_CHUNK_SIZE;
+ chunk_spare = fwu->image_size % F35_CHUNK_SIZE;
+ if (chunk_spare)
+ chunk_total++;
+
+ for (chunk = 0; chunk < chunk_total; chunk++) {
+ if (chunk_spare && chunk == chunk_total - 1)
+ chunk_size = chunk_spare;
+ else
+ chunk_size = F35_CHUNK_SIZE;
+
+ memset(buf, 0x00, F35_CHUNK_SIZE);
+ secure_memcpy(buf, sizeof(buf), chunk_ptr,
+ fwu->image_size - bytes_written,
+ chunk_size);
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ ctrl_base + F35_CHUNK_DATA_OFFSET,
+ buf,
+ sizeof(buf));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write chunk data (chunk %d)\n",
+ __func__, chunk);
+ return retval;
+ }
+ chunk_ptr += chunk_size;
+ bytes_written += chunk_size;
+ }
+
+ retval = fwu_recovery_check_status();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write chunk data\n",
+ __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+static int fwu_recovery_reset(void)
+{
+ int retval;
+ unsigned char ctrl_base;
+ unsigned char command = CMD_F35_RESET;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ ctrl_base = fwu->f35_fd.ctrl_base_addr;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ ctrl_base + F35_CHUNK_COMMAND_OFFSET,
+ &command,
+ sizeof(command));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to issue reset command\n",
+ __func__);
+ return retval;
+ }
+
+ msleep(F35_RESET_WAIT_MS);
+
+ return 0;
+}
+
+static int fwu_start_recovery(void)
+{
+ int retval;
+ const struct firmware *fw_entry = NULL;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (rmi4_data->sensor_sleep) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Sensor sleeping\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ rmi4_data->stay_awake = true;
+
+ mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+ pr_notice("%s: Start of recovery process\n", __func__);
+
+ if (fwu->image == NULL) {
+ retval = secure_memcpy(fwu->image_name, MAX_IMAGE_NAME_LEN,
+ FW_IHEX_NAME, sizeof(FW_IHEX_NAME),
+ sizeof(FW_IHEX_NAME));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy ihex file name\n",
+ __func__);
+ goto exit;
+ }
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Requesting firmware ihex %s\n",
+ __func__, fwu->image_name);
+
+ retval = request_firmware(&fw_entry, fwu->image_name,
+ rmi4_data->pdev->dev.parent);
+ if (retval != 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Firmware ihex %s not available\n",
+ __func__, fwu->image_name);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Firmware image size = %d\n",
+ __func__, (unsigned int)fw_entry->size);
+
+ fwu->image = fw_entry->data;
+ fwu->image_size = fw_entry->size;
+ }
+
+ retval = rmi4_data->irq_enable(rmi4_data, false, false);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to disable interrupt\n",
+ __func__);
+ goto exit;
+ }
+
+ retval = fwu_recovery_erase_all();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to do erase all in recovery mode\n",
+ __func__);
+ goto exit;
+ }
+
+ pr_notice("%s: External flash erased\n", __func__);
+
+ retval = fwu_recovery_write_chunk();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write chunk data in recovery mode\n",
+ __func__);
+ goto exit;
+ }
+
+ pr_notice("%s: Chunk data programmed\n", __func__);
+
+ retval = fwu_recovery_reset();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to reset device in recovery mode\n",
+ __func__);
+ goto exit;
+ }
+
+ pr_notice("%s: Recovery mode reset issued\n", __func__);
+
+ rmi4_data->reset_device(rmi4_data, true);
+
+ retval = 0;
+
+exit:
+ if (fw_entry)
+ release_firmware(fw_entry);
+
+ pr_notice("%s: End of recovery process\n", __func__);
+
+ mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+ rmi4_data->stay_awake = false;
+
+ return retval;
+}
+
+int synaptics_fw_updater(const unsigned char *fw_data)
+{
+ int retval;
+
+ if (!fwu)
+ return -ENODEV;
+
+ if (!fwu->initialized)
+ return -ENODEV;
+
+ if (fwu->in_ub_mode) {
+ fwu->image = NULL;
+ retval = fwu_start_recovery();
+ if (retval < 0)
+ return retval;
+ }
+
+ fwu->image = fw_data;
+
+ retval = fwu_start_reflash();
+
+ fwu->image = NULL;
+
+ return retval;
+}
+EXPORT_SYMBOL(synaptics_fw_updater);
+
+#ifdef DO_STARTUP_FW_UPDATE
+static void fwu_startup_fw_update_work(struct work_struct *work)
+{
+ static unsigned char do_once = 1;
+#ifdef WAIT_FOR_FB_READY
+ unsigned int timeout;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+#endif
+
+ if (!do_once)
+ return;
+ do_once = 0;
+
+#ifdef WAIT_FOR_FB_READY
+ timeout = FB_READY_TIMEOUT_S * 1000 / FB_READY_WAIT_MS + 1;
+
+ while (!rmi4_data->fb_ready) {
+ msleep(FB_READY_WAIT_MS);
+ timeout--;
+ if (timeout == 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Timed out waiting for FB ready\n",
+ __func__);
+ return;
+ }
+ }
+#endif
+
+ synaptics_fw_updater(NULL);
+
+ return;
+}
+#endif
+
+static ssize_t fwu_sysfs_show_image(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (!mutex_trylock(&fwu_sysfs_mutex))
+ return -EBUSY;
+
+ if (count < fwu->config_size) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Not enough space (%d bytes) in buffer\n",
+ __func__, (unsigned int)count);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ retval = secure_memcpy(buf, count, fwu->read_config_buf,
+ fwu->read_config_buf_size, fwu->config_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy config data\n",
+ __func__);
+ goto exit;
+ } else {
+ retval = fwu->config_size;
+ }
+
+exit:
+ mutex_unlock(&fwu_sysfs_mutex);
+ return retval;
+}
+
+static ssize_t fwu_sysfs_store_image(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (!mutex_trylock(&fwu_sysfs_mutex))
+ return -EBUSY;
+
+ retval = secure_memcpy(&fwu->ext_data_source[fwu->data_pos],
+ fwu->image_size - fwu->data_pos, buf, count, count);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy image data\n",
+ __func__);
+ goto exit;
+ } else {
+ retval = count;
+ }
+
+ fwu->data_pos += count;
+
+exit:
+ mutex_unlock(&fwu_sysfs_mutex);
+ return retval;
+}
+
+static ssize_t fwu_sysfs_do_recovery_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned int input;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (!mutex_trylock(&fwu_sysfs_mutex))
+ return -EBUSY;
+
+ if (kstrtouint(buf, 10, &input) != 1) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (!fwu->in_ub_mode) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Not in microbootloader mode\n",
+ __func__);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (!fwu->ext_data_source) {
+ retval = -EINVAL;
+ goto exit;
+ } else {
+ fwu->image = fwu->ext_data_source;
+ }
+
+ retval = fwu_start_recovery();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to do recovery\n",
+ __func__);
+ goto exit;
+ }
+
+ retval = count;
+
+exit:
+ kfree(fwu->ext_data_source);
+ fwu->ext_data_source = NULL;
+ fwu->image = NULL;
+ mutex_unlock(&fwu_sysfs_mutex);
+ return retval;
+}
+
+static ssize_t fwu_sysfs_do_reflash_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned int input;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (!mutex_trylock(&fwu_sysfs_mutex))
+ return -EBUSY;
+
+ if (kstrtouint(buf, 10, &input) != 1) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (fwu->in_ub_mode) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: In microbootloader mode\n",
+ __func__);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (!fwu->ext_data_source) {
+ retval = -EINVAL;
+ goto exit;
+ } else {
+ fwu->image = fwu->ext_data_source;
+ }
+
+ if (input & LOCKDOWN) {
+ fwu->do_lockdown = true;
+ input &= ~LOCKDOWN;
+ }
+
+ if ((input != NORMAL) && (input != FORCE)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (input == FORCE)
+ fwu->force_update = true;
+
+ retval = synaptics_fw_updater(fwu->image);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to do reflash\n",
+ __func__);
+ goto exit;
+ }
+
+ retval = count;
+
+exit:
+ kfree(fwu->ext_data_source);
+ fwu->ext_data_source = NULL;
+ fwu->image = NULL;
+ fwu->force_update = FORCE_UPDATE;
+ fwu->do_lockdown = DO_LOCKDOWN;
+ mutex_unlock(&fwu_sysfs_mutex);
+ return retval;
+}
+
+static ssize_t fwu_sysfs_write_config_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned int input;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (!mutex_trylock(&fwu_sysfs_mutex))
+ return -EBUSY;
+
+ if (kstrtouint(buf, 10, &input) != 1) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (input != 1) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (fwu->in_ub_mode) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: In microbootloader mode\n",
+ __func__);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (!fwu->ext_data_source) {
+ retval = -EINVAL;
+ goto exit;
+ } else {
+ fwu->image = fwu->ext_data_source;
+ }
+
+ retval = fwu_start_write_config();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write config\n",
+ __func__);
+ goto exit;
+ }
+
+ retval = count;
+
+exit:
+ kfree(fwu->ext_data_source);
+ fwu->ext_data_source = NULL;
+ fwu->image = NULL;
+ mutex_unlock(&fwu_sysfs_mutex);
+ return retval;
+}
+
+static ssize_t fwu_sysfs_read_config_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned int input;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (kstrtouint(buf, 10, &input) != 1)
+ return -EINVAL;
+
+ if (input != 1)
+ return -EINVAL;
+
+ if (!mutex_trylock(&fwu_sysfs_mutex))
+ return -EBUSY;
+
+ if (fwu->in_ub_mode) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: In microbootloader mode\n",
+ __func__);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ retval = fwu_do_read_config();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read config\n",
+ __func__);
+ goto exit;
+ }
+
+ retval = count;
+
+exit:
+ mutex_unlock(&fwu_sysfs_mutex);
+ return retval;
+}
+
+static ssize_t fwu_sysfs_config_area_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned long config_area;
+
+ retval = sstrtoul(buf, 10, &config_area);
+ if (retval)
+ return retval;
+
+ if (!mutex_trylock(&fwu_sysfs_mutex))
+ return -EBUSY;
+
+ fwu->config_area = config_area;
+
+ mutex_unlock(&fwu_sysfs_mutex);
+
+ return count;
+}
+
+static ssize_t fwu_sysfs_image_name_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (!mutex_trylock(&fwu_sysfs_mutex))
+ return -EBUSY;
+
+ retval = secure_memcpy(fwu->image_name, MAX_IMAGE_NAME_LEN,
+ buf, count, count);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy image file name\n",
+ __func__);
+ } else {
+ retval = count;
+ }
+
+ mutex_unlock(&fwu_sysfs_mutex);
+
+ return retval;
+}
+
+static ssize_t fwu_sysfs_image_size_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned long size;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ retval = sstrtoul(buf, 10, &size);
+ if (retval)
+ return retval;
+
+ if (!mutex_trylock(&fwu_sysfs_mutex))
+ return -EBUSY;
+
+ fwu->image_size = size;
+ fwu->data_pos = 0;
+
+ kfree(fwu->ext_data_source);
+ fwu->ext_data_source = kzalloc(fwu->image_size, GFP_KERNEL);
+ if (!fwu->ext_data_source) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for image data\n",
+ __func__);
+ retval = -ENOMEM;
+ } else {
+ retval = count;
+ }
+
+ mutex_unlock(&fwu_sysfs_mutex);
+
+ return retval;
+}
+
+static ssize_t fwu_sysfs_block_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+
+ if (!mutex_trylock(&fwu_sysfs_mutex))
+ return -EBUSY;
+
+ retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->block_size);
+
+ mutex_unlock(&fwu_sysfs_mutex);
+
+ return retval;
+}
+
+static ssize_t fwu_sysfs_firmware_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+
+ if (!mutex_trylock(&fwu_sysfs_mutex))
+ return -EBUSY;
+
+ retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.ui_firmware);
+
+ mutex_unlock(&fwu_sysfs_mutex);
+
+ return retval;
+}
+
+static ssize_t fwu_sysfs_configuration_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+
+ if (!mutex_trylock(&fwu_sysfs_mutex))
+ return -EBUSY;
+
+ retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.ui_config);
+
+ mutex_unlock(&fwu_sysfs_mutex);
+
+ return retval;
+}
+
+static ssize_t fwu_sysfs_disp_config_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+
+ if (!mutex_trylock(&fwu_sysfs_mutex))
+ return -EBUSY;
+
+ retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.dp_config);
+
+ mutex_unlock(&fwu_sysfs_mutex);
+
+ return retval;
+}
+
+static ssize_t fwu_sysfs_perm_config_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+
+ if (!mutex_trylock(&fwu_sysfs_mutex))
+ return -EBUSY;
+
+ retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.pm_config);
+
+ mutex_unlock(&fwu_sysfs_mutex);
+
+ return retval;
+}
+
+static ssize_t fwu_sysfs_bl_config_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+
+ if (!mutex_trylock(&fwu_sysfs_mutex))
+ return -EBUSY;
+
+ retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.bl_config);
+
+ mutex_unlock(&fwu_sysfs_mutex);
+
+ return retval;
+}
+
+static ssize_t fwu_sysfs_utility_parameter_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+
+ if (!mutex_trylock(&fwu_sysfs_mutex))
+ return -EBUSY;
+
+ retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.utility_param);
+
+ mutex_unlock(&fwu_sysfs_mutex);
+
+ return retval;
+}
+
+static ssize_t fwu_sysfs_guest_code_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+
+ if (!mutex_trylock(&fwu_sysfs_mutex))
+ return -EBUSY;
+
+ retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.guest_code);
+
+ mutex_unlock(&fwu_sysfs_mutex);
+
+ return retval;
+}
+
+static ssize_t fwu_sysfs_write_guest_code_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned int input;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (!mutex_trylock(&fwu_sysfs_mutex))
+ return -EBUSY;
+
+ if (kstrtouint(buf, 10, &input) != 1) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (input != 1) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (fwu->in_ub_mode) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: In microbootloader mode\n",
+ __func__);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (!fwu->ext_data_source) {
+ retval = -EINVAL;
+ goto exit;
+ } else {
+ fwu->image = fwu->ext_data_source;
+ }
+
+ retval = fwu_start_write_guest_code();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write guest code\n",
+ __func__);
+ goto exit;
+ }
+
+ retval = count;
+
+exit:
+ kfree(fwu->ext_data_source);
+ fwu->ext_data_source = NULL;
+ fwu->image = NULL;
+ mutex_unlock(&fwu_sysfs_mutex);
+ return retval;
+}
+
+#ifdef SYNA_TDDI
+static ssize_t fwu_sysfs_read_lockdown_code_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned short lockdown_data_size;
+ unsigned char *lockdown_data;
+ char ld_val[2];
+ int retval = 0;
+ int i = 0;
+
+ if (!mutex_trylock(&fwu_sysfs_mutex))
+ return -EBUSY;
+
+ lockdown_data_size = fwu->blkcount.tddi_lockdown_data * fwu->block_size;
+ lockdown_data = kzalloc(lockdown_data_size, GFP_KERNEL);
+ if (!lockdown_data) {
+ mutex_unlock(&fwu_sysfs_mutex);
+ return -ENOMEM;
+ }
+
+ if (get_tddi_lockdown_data(lockdown_data, lockdown_data_size) < 0) {
+ kfree(lockdown_data);
+ mutex_unlock(&fwu_sysfs_mutex);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < lockdown_data_size; i++) {
+ retval += snprintf(ld_val, PAGE_SIZE, "%02x",
+ *(lockdown_data + i));
+ strlcat(buf, ld_val, lockdown_data_size);
+ }
+ *(buf + retval) = '\n';
+ kfree(lockdown_data);
+ mutex_unlock(&fwu_sysfs_mutex);
+ return retval + 1;
+}
+
+static ssize_t fwu_sysfs_write_lockdown_code_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned short lockdown_data_size = (count - 1) / 2;
+ unsigned char *lockdown_data;
+ unsigned char temp[2];
+ int ld_val;
+ int i = 0;
+
+ for (i = 0; i < (count - 1); i++) {
+ if (((*buf >= '0') && (*buf <= '9')) ||
+ (('a' < *buf) && (*buf > 'f')) ||
+ (('A' < *buf) && (*buf > 'F')))
+ continue;
+ else
+ return -EINVAL;
+ }
+
+ if (count % 2 != 1)
+ return -EINVAL;
+
+ lockdown_data = kzalloc(lockdown_data_size, GFP_KERNEL);
+ if (!lockdown_data)
+ return -ENOMEM;
+
+ for (i = 0; i < lockdown_data_size; i++) {
+ memcpy(temp, (buf + 2 * i), sizeof(temp));
+ if (kstrtoint(temp, 16, &ld_val) == 1)
+ *(lockdown_data + i) = ld_val & 0xff;
+ }
+
+ if (!mutex_trylock(&fwu_sysfs_mutex))
+ return -EBUSY;
+
+ if (set_tddi_lockdown_data(lockdown_data, lockdown_data_size) < 0) {
+ kfree(lockdown_data);
+ mutex_unlock(&fwu_sysfs_mutex);
+ return -EINVAL;
+ }
+ kfree(lockdown_data);
+ mutex_unlock(&fwu_sysfs_mutex);
+ return count;
+}
+#endif
+static void synaptics_rmi4_fwu_attn(struct synaptics_rmi4_data *rmi4_data,
+ unsigned char intr_mask)
+{
+ if (!fwu)
+ return;
+
+ if (fwu->intr_mask & intr_mask)
+ fwu_read_flash_status();
+
+ return;
+}
+
+static int synaptics_rmi4_fwu_init(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned char attr_count;
+ struct pdt_properties pdt_props;
+
+ if (fwu) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Handle already exists\n",
+ __func__);
+ return 0;
+ }
+
+ fwu = kzalloc(sizeof(*fwu), GFP_KERNEL);
+ if (!fwu) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for fwu\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ fwu->image_name = kzalloc(MAX_IMAGE_NAME_LEN, GFP_KERNEL);
+ if (!fwu->image_name) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for image name\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit_free_fwu;
+ }
+
+ fwu->rmi4_data = rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ PDT_PROPS,
+ pdt_props.data,
+ sizeof(pdt_props.data));
+ if (retval < 0) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read PDT properties, assuming 0x00\n",
+ __func__);
+ } else if (pdt_props.has_bsr) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Reflash for LTS not currently supported\n",
+ __func__);
+ retval = -ENODEV;
+ goto exit_free_mem;
+ }
+
+ retval = fwu_scan_pdt();
+ if (retval < 0)
+ goto exit_free_mem;
+
+ if (!fwu->in_ub_mode) {
+ retval = fwu_read_f34_queries();
+ if (retval < 0)
+ goto exit_free_mem;
+
+ retval = fwu_get_device_config_id();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read device config ID\n",
+ __func__);
+ goto exit_free_mem;
+ }
+ }
+
+ fwu->force_update = FORCE_UPDATE;
+ fwu->do_lockdown = DO_LOCKDOWN;
+ fwu->initialized = true;
+
+#ifdef DO_STARTUP_FW_UPDATE
+ fwu->fwu_workqueue = create_singlethread_workqueue("fwu_workqueue");
+ INIT_WORK(&fwu->fwu_work, fwu_startup_fw_update_work);
+ queue_work(fwu->fwu_workqueue,
+ &fwu->fwu_work);
+#endif
+
+#ifdef F51_DISCRETE_FORCE
+ fwu_read_flash_status();
+ if (!fwu->in_bl_mode) {
+ retval = fwu_f51_force_data_init();
+ if (retval < 0)
+ goto exit_free_mem;
+ }
+#endif
+
+ if (ENABLE_SYS_REFLASH == false)
+ return 0;
+
+ retval = sysfs_create_bin_file(&rmi4_data->input_dev->dev.kobj,
+ &dev_attr_data);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create sysfs bin file\n",
+ __func__);
+ goto exit_free_mem;
+ }
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create sysfs attributes\n",
+ __func__);
+ retval = -ENODEV;
+ goto exit_remove_attrs;
+ }
+ }
+
+ return 0;
+
+exit_remove_attrs:
+ for (attr_count--; attr_count >= 0; attr_count--) {
+ sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ }
+
+ sysfs_remove_bin_file(&rmi4_data->input_dev->dev.kobj, &dev_attr_data);
+
+exit_free_mem:
+ kfree(fwu->image_name);
+
+exit_free_fwu:
+ kfree(fwu);
+ fwu = NULL;
+
+exit:
+ return retval;
+}
+
+static void synaptics_rmi4_fwu_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+ unsigned char attr_count;
+
+ if (!fwu)
+ goto exit;
+
+#ifdef DO_STARTUP_FW_UPDATE
+ cancel_work_sync(&fwu->fwu_work);
+ flush_workqueue(fwu->fwu_workqueue);
+ destroy_workqueue(fwu->fwu_workqueue);
+#endif
+
+#ifdef F51_DISCRETE_FORCE
+ kfree(fwu->cal_data);
+#endif
+ kfree(fwu->read_config_buf);
+ kfree(fwu->image_name);
+ kfree(fwu);
+ fwu = NULL;
+
+ if (ENABLE_SYS_REFLASH == false)
+ goto exit;
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ }
+
+ sysfs_remove_bin_file(&rmi4_data->input_dev->dev.kobj, &dev_attr_data);
+
+exit:
+ complete(&fwu_remove_complete);
+
+ return;
+}
+
+static void synaptics_rmi4_fwu_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+
+ if (!fwu) {
+ synaptics_rmi4_fwu_init(rmi4_data);
+ return;
+ }
+
+ retval = fwu_scan_pdt();
+ if (retval < 0)
+ return;
+
+ if (!fwu->in_ub_mode)
+ fwu_read_f34_queries();
+
+#ifdef F51_DISCRETE_FORCE
+ fwu_read_flash_status();
+ if (!fwu->in_bl_mode)
+ fwu_f51_force_data_init();
+#endif
+
+ return;
+}
+
+static struct synaptics_rmi4_exp_fn fwu_module = {
+ .fn_type = RMI_FW_UPDATER,
+ .init = synaptics_rmi4_fwu_init,
+ .remove = synaptics_rmi4_fwu_remove,
+ .reset = synaptics_rmi4_fwu_reset,
+ .reinit = NULL,
+ .early_suspend = NULL,
+ .suspend = NULL,
+ .resume = NULL,
+ .late_resume = NULL,
+ .attn = synaptics_rmi4_fwu_attn,
+};
+
+static int __init rmi4_fw_update_module_init(void)
+{
+ synaptics_rmi4_new_function(&fwu_module, true);
+
+ return 0;
+}
+
+static void __exit rmi4_fw_update_module_exit(void)
+{
+ synaptics_rmi4_new_function(&fwu_module, false);
+
+ wait_for_completion(&fwu_remove_complete);
+
+ return;
+}
+
+module_init(rmi4_fw_update_module_init);
+module_exit(rmi4_fw_update_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX FW Update Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_gesture.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_gesture.c
new file mode 100644
index 0000000..875670b
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_gesture.c
@@ -0,0 +1,2308 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define GESTURE_PHYS_NAME "synaptics_dsx/gesture"
+
+#define TUNING_SYSFS_DIR_NAME "tuning"
+
+#define STORE_GESTURES
+#ifdef STORE_GESTURES
+#define GESTURES_TO_STORE 10
+#endif
+
+#define CTRL23_FINGER_REPORT_ENABLE_BIT 0
+#define CTRL27_UDG_ENABLE_BIT 4
+#define WAKEUP_GESTURE_MODE 0x02
+
+static ssize_t udg_sysfs_engine_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_detection_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_detection_score_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_detection_index_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_registration_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_registration_begin_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_registration_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_max_index_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_detection_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_index_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_template_valid_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_valid_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_template_clear_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_trace_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_data_show(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count);
+
+static ssize_t udg_sysfs_template_data_store(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count);
+
+static ssize_t udg_sysfs_trace_data_show(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count);
+
+static ssize_t udg_sysfs_template_displacement_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_displacement_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_rotation_invariance_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_rotation_invariance_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_scale_invariance_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_scale_invariance_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_threshold_factor_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_threshold_factor_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_match_metric_threshold_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_match_metric_threshold_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_max_inter_stroke_time_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_max_inter_stroke_time_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static int udg_read_tuning_params(void);
+
+static int udg_write_tuning_params(void);
+
+static int udg_detection_enable(bool enable);
+
+static int udg_engine_enable(bool enable);
+
+static int udg_set_index(unsigned char index);
+
+#ifdef STORE_GESTURES
+static int udg_read_valid_data(void);
+static int udg_write_valid_data(void);
+static int udg_read_template_data(unsigned char index);
+static int udg_write_template_data(void);
+#endif
+
+enum gesture_type {
+ DETECTION = 0x0f,
+ REGISTRATION = 0x10,
+};
+
+struct udg_tuning {
+ union {
+ struct {
+ unsigned char maximum_number_of_templates;
+ unsigned char template_size;
+ unsigned char template_disp_lsb;
+ unsigned char template_disp_msb;
+ unsigned char rotation_inv_lsb;
+ unsigned char rotation_inv_msb;
+ unsigned char scale_inv_lsb;
+ unsigned char scale_inv_msb;
+ unsigned char thres_factor_lsb;
+ unsigned char thres_factor_msb;
+ unsigned char metric_thres_lsb;
+ unsigned char metric_thres_msb;
+ unsigned char inter_stroke_lsb;
+ unsigned char inter_stroke_msb;
+ } __packed;
+ unsigned char data[14];
+ };
+};
+
+struct udg_addr {
+ unsigned short data_4;
+ unsigned short ctrl_18;
+ unsigned short ctrl_20;
+ unsigned short ctrl_23;
+ unsigned short ctrl_27;
+ unsigned short ctrl_41;
+ unsigned short trace_x;
+ unsigned short trace_y;
+ unsigned short trace_segment;
+ unsigned short template_helper;
+ unsigned short template_data;
+ unsigned short template_flags;
+};
+
+struct synaptics_rmi4_f12_query_0 {
+ union {
+ struct {
+ struct {
+ unsigned char has_register_descriptors:1;
+ unsigned char has_closed_cover:1;
+ unsigned char has_fast_glove_detect:1;
+ unsigned char has_dribble:1;
+ unsigned char has_4p4_jitter_filter_strength:1;
+ unsigned char f12_query0_s0_b5__7:3;
+ } __packed;
+ struct {
+ unsigned char max_num_templates:4;
+ unsigned char f12_query0_s1_b4__7:4;
+ unsigned char template_size_lsb;
+ unsigned char template_size_msb;
+ } __packed;
+ };
+ unsigned char data[4];
+ };
+};
+
+struct synaptics_rmi4_f12_query_5 {
+ union {
+ struct {
+ unsigned char size_of_query6;
+ struct {
+ unsigned char ctrl0_is_present:1;
+ unsigned char ctrl1_is_present:1;
+ unsigned char ctrl2_is_present:1;
+ unsigned char ctrl3_is_present:1;
+ unsigned char ctrl4_is_present:1;
+ unsigned char ctrl5_is_present:1;
+ unsigned char ctrl6_is_present:1;
+ unsigned char ctrl7_is_present:1;
+ } __packed;
+ struct {
+ unsigned char ctrl8_is_present:1;
+ unsigned char ctrl9_is_present:1;
+ unsigned char ctrl10_is_present:1;
+ unsigned char ctrl11_is_present:1;
+ unsigned char ctrl12_is_present:1;
+ unsigned char ctrl13_is_present:1;
+ unsigned char ctrl14_is_present:1;
+ unsigned char ctrl15_is_present:1;
+ } __packed;
+ struct {
+ unsigned char ctrl16_is_present:1;
+ unsigned char ctrl17_is_present:1;
+ unsigned char ctrl18_is_present:1;
+ unsigned char ctrl19_is_present:1;
+ unsigned char ctrl20_is_present:1;
+ unsigned char ctrl21_is_present:1;
+ unsigned char ctrl22_is_present:1;
+ unsigned char ctrl23_is_present:1;
+ } __packed;
+ struct {
+ unsigned char ctrl24_is_present:1;
+ unsigned char ctrl25_is_present:1;
+ unsigned char ctrl26_is_present:1;
+ unsigned char ctrl27_is_present:1;
+ unsigned char ctrl28_is_present:1;
+ unsigned char ctrl29_is_present:1;
+ unsigned char ctrl30_is_present:1;
+ unsigned char ctrl31_is_present:1;
+ } __packed;
+ struct {
+ unsigned char ctrl32_is_present:1;
+ unsigned char ctrl33_is_present:1;
+ unsigned char ctrl34_is_present:1;
+ unsigned char ctrl35_is_present:1;
+ unsigned char ctrl36_is_present:1;
+ unsigned char ctrl37_is_present:1;
+ unsigned char ctrl38_is_present:1;
+ unsigned char ctrl39_is_present:1;
+ } __packed;
+ struct {
+ unsigned char ctrl40_is_present:1;
+ unsigned char ctrl41_is_present:1;
+ unsigned char ctrl42_is_present:1;
+ unsigned char ctrl43_is_present:1;
+ unsigned char ctrl44_is_present:1;
+ unsigned char ctrl45_is_present:1;
+ unsigned char ctrl46_is_present:1;
+ unsigned char ctrl47_is_present:1;
+ } __packed;
+ };
+ unsigned char data[7];
+ };
+};
+
+struct synaptics_rmi4_f12_query_8 {
+ union {
+ struct {
+ unsigned char size_of_query9;
+ struct {
+ unsigned char data0_is_present:1;
+ unsigned char data1_is_present:1;
+ unsigned char data2_is_present:1;
+ unsigned char data3_is_present:1;
+ unsigned char data4_is_present:1;
+ unsigned char data5_is_present:1;
+ unsigned char data6_is_present:1;
+ unsigned char data7_is_present:1;
+ } __packed;
+ struct {
+ unsigned char data8_is_present:1;
+ unsigned char data9_is_present:1;
+ unsigned char data10_is_present:1;
+ unsigned char data11_is_present:1;
+ unsigned char data12_is_present:1;
+ unsigned char data13_is_present:1;
+ unsigned char data14_is_present:1;
+ unsigned char data15_is_present:1;
+ } __packed;
+ struct {
+ unsigned char data16_is_present:1;
+ unsigned char data17_is_present:1;
+ unsigned char data18_is_present:1;
+ unsigned char data19_is_present:1;
+ unsigned char data20_is_present:1;
+ unsigned char data21_is_present:1;
+ unsigned char data22_is_present:1;
+ unsigned char data23_is_present:1;
+ } __packed;
+ };
+ unsigned char data[4];
+ };
+};
+
+struct synaptics_rmi4_f12_control_41 {
+ union {
+ struct {
+ unsigned char enable_registration:1;
+ unsigned char template_index:4;
+ unsigned char begin:1;
+ unsigned char f12_ctrl41_b6__7:2;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct synaptics_rmi4_udg_handle {
+ atomic_t attn_event;
+ unsigned char intr_mask;
+ unsigned char report_flags;
+ unsigned char object_type_enable1;
+ unsigned char object_type_enable2;
+ unsigned char trace_size;
+ unsigned char template_index;
+ unsigned char max_num_templates;
+ unsigned char detection_score;
+ unsigned char detection_index;
+ unsigned char detection_status;
+ unsigned char registration_status;
+ unsigned char *ctrl_buf;
+ unsigned char *trace_data_buf;
+ unsigned char *template_data_buf;
+#ifdef STORE_GESTURES
+ unsigned char gestures_to_store;
+ unsigned char *storage_buf;
+ unsigned char valid_buf[2];
+#endif
+ unsigned short trace_data_buf_size;
+ unsigned short template_size;
+ unsigned short template_data_size;
+ unsigned short query_base_addr;
+ unsigned short control_base_addr;
+ unsigned short data_base_addr;
+ unsigned short command_base_addr;
+ unsigned short ctrl_18_sub10_off;
+ unsigned short ctrl_20_sub1_off;
+ unsigned short ctrl_23_sub3_off;
+ unsigned short ctrl_27_sub5_off;
+ struct input_dev *udg_dev;
+ struct kobject *tuning_dir;
+ struct udg_addr addr;
+ struct udg_tuning tuning;
+ struct synaptics_rmi4_data *rmi4_data;
+};
+
+static struct device_attribute attrs[] = {
+ __ATTR(engine_enable, 0220,
+ synaptics_rmi4_show_error,
+ udg_sysfs_engine_enable_store),
+ __ATTR(detection_enable, 0220,
+ synaptics_rmi4_show_error,
+ udg_sysfs_detection_enable_store),
+ __ATTR(detection_score, 0444,
+ udg_sysfs_detection_score_show,
+ synaptics_rmi4_store_error),
+ __ATTR(detection_index, 0444,
+ udg_sysfs_detection_index_show,
+ synaptics_rmi4_store_error),
+ __ATTR(registration_enable, 0220,
+ synaptics_rmi4_show_error,
+ udg_sysfs_registration_enable_store),
+ __ATTR(registration_begin, 0220,
+ synaptics_rmi4_show_error,
+ udg_sysfs_registration_begin_store),
+ __ATTR(registration_status, 0444,
+ udg_sysfs_registration_status_show,
+ synaptics_rmi4_store_error),
+ __ATTR(template_size, 0444,
+ udg_sysfs_template_size_show,
+ synaptics_rmi4_store_error),
+ __ATTR(template_max_index, 0444,
+ udg_sysfs_template_max_index_show,
+ synaptics_rmi4_store_error),
+ __ATTR(template_detection, 0444,
+ udg_sysfs_template_detection_show,
+ synaptics_rmi4_store_error),
+ __ATTR(template_index, 0220,
+ synaptics_rmi4_show_error,
+ udg_sysfs_template_index_store),
+ __ATTR(template_valid, 0664,
+ udg_sysfs_template_valid_show,
+ udg_sysfs_template_valid_store),
+ __ATTR(template_clear, 0220,
+ synaptics_rmi4_show_error,
+ udg_sysfs_template_clear_store),
+ __ATTR(trace_size, 0444,
+ udg_sysfs_trace_size_show,
+ synaptics_rmi4_store_error),
+};
+
+static struct bin_attribute template_data = {
+ .attr = {
+ .name = "template_data",
+ .mode = 0664,
+ },
+ .size = 0,
+ .read = udg_sysfs_template_data_show,
+ .write = udg_sysfs_template_data_store,
+};
+
+static struct bin_attribute trace_data = {
+ .attr = {
+ .name = "trace_data",
+ .mode = 0444,
+ },
+ .size = 0,
+ .read = udg_sysfs_trace_data_show,
+ .write = NULL,
+};
+
+static struct device_attribute params[] = {
+ __ATTR(template_displacement, 0664,
+ udg_sysfs_template_displacement_show,
+ udg_sysfs_template_displacement_store),
+ __ATTR(rotation_invariance, 0664,
+ udg_sysfs_rotation_invariance_show,
+ udg_sysfs_rotation_invariance_store),
+ __ATTR(scale_invariance, 0664,
+ udg_sysfs_scale_invariance_show,
+ udg_sysfs_scale_invariance_store),
+ __ATTR(threshold_factor, 0664,
+ udg_sysfs_threshold_factor_show,
+ udg_sysfs_threshold_factor_store),
+ __ATTR(match_metric_threshold, 0664,
+ udg_sysfs_match_metric_threshold_show,
+ udg_sysfs_match_metric_threshold_store),
+ __ATTR(max_inter_stroke_time, 0664,
+ udg_sysfs_max_inter_stroke_time_show,
+ udg_sysfs_max_inter_stroke_time_store),
+};
+
+static struct synaptics_rmi4_udg_handle *udg;
+
+static unsigned char ctrl_18_sub_size[] = {10, 10, 10, 2, 3, 4, 3, 3, 1, 1};
+static unsigned char ctrl_20_sub_size[] = {2};
+static unsigned char ctrl_23_sub_size[] = {1, 1, 1};
+static unsigned char ctrl_27_sub_size[] = {1, 5, 2, 1, 7};
+
+DECLARE_COMPLETION(udg_remove_complete);
+
+static ssize_t udg_sysfs_engine_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ bool enable;
+ unsigned int input;
+
+ if (kstrtouint(buf, 10, &input) != 1)
+ return -EINVAL;
+
+ if (input == 1)
+ enable = true;
+ else if (input == 0)
+ enable = false;
+ else
+ return -EINVAL;
+
+ retval = udg_engine_enable(enable);
+ if (retval < 0)
+ return retval;
+
+ return count;
+}
+
+static ssize_t udg_sysfs_detection_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ bool enable;
+ unsigned int input;
+
+ if (kstrtouint(buf, 10, &input) != 1)
+ return -EINVAL;
+
+ if (input == 1)
+ enable = true;
+ else if (input == 0)
+ enable = false;
+ else
+ return -EINVAL;
+
+ udg->detection_status = 0;
+
+ retval = udg_detection_enable(enable);
+ if (retval < 0)
+ return retval;
+
+ return count;
+}
+
+static ssize_t udg_sysfs_detection_score_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", udg->detection_score);
+}
+
+static ssize_t udg_sysfs_detection_index_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", udg->detection_index);
+}
+
+static ssize_t udg_sysfs_registration_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ bool enable;
+ unsigned int input;
+ struct synaptics_rmi4_f12_control_41 control_41;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ if (kstrtouint(buf, 10, &input) != 1)
+ return -EINVAL;
+
+ if (input == 1)
+ enable = true;
+ else if (input == 0)
+ enable = false;
+ else
+ return -EINVAL;
+
+ if (enable) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.ctrl_23,
+ udg->ctrl_buf,
+ udg->ctrl_23_sub3_off + 1);
+ if (retval < 0)
+ return retval;
+
+ udg->ctrl_buf[0] = 0;
+ udg->ctrl_buf[0] |= (1 << CTRL23_FINGER_REPORT_ENABLE_BIT);
+ if (udg->ctrl_23_sub3_off)
+ udg->ctrl_buf[udg->ctrl_23_sub3_off] = 0;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ udg->addr.ctrl_23,
+ udg->ctrl_buf,
+ udg->ctrl_23_sub3_off + 1);
+ if (retval < 0)
+ return retval;
+ } else {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.ctrl_23,
+ udg->ctrl_buf,
+ udg->ctrl_23_sub3_off + 1);
+ if (retval < 0)
+ return retval;
+
+ udg->ctrl_buf[0] = udg->object_type_enable1;
+ if (udg->ctrl_23_sub3_off) {
+ udg->ctrl_buf[udg->ctrl_23_sub3_off] =
+ udg->object_type_enable2;
+ }
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ udg->addr.ctrl_23,
+ udg->ctrl_buf,
+ udg->ctrl_23_sub3_off + 1);
+ if (retval < 0)
+ return retval;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.ctrl_41,
+ control_41.data,
+ sizeof(control_41.data));
+ if (retval < 0)
+ return retval;
+
+ control_41.enable_registration = enable ? 1 : 0;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ udg->addr.ctrl_41,
+ control_41.data,
+ sizeof(control_41.data));
+ if (retval < 0)
+ return retval;
+
+ return count;
+}
+
+static ssize_t udg_sysfs_registration_begin_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ bool begin;
+ unsigned int input;
+ struct synaptics_rmi4_f12_control_41 control_41;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ if (kstrtouint(buf, 10, &input) != 1)
+ return -EINVAL;
+
+ if (input == 1)
+ begin = true;
+ else if (input == 0)
+ begin = false;
+ else
+ return -EINVAL;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.ctrl_41,
+ control_41.data,
+ sizeof(control_41.data));
+ if (retval < 0)
+ return retval;
+
+ control_41.begin = begin ? 1 : 0;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ udg->addr.ctrl_41,
+ control_41.data,
+ sizeof(control_41.data));
+ if (retval < 0)
+ return retval;
+
+ return count;
+}
+
+static ssize_t udg_sysfs_registration_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%02x\n", udg->registration_status);
+}
+
+static ssize_t udg_sysfs_template_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", udg->template_size);
+}
+
+static ssize_t udg_sysfs_template_max_index_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", udg->max_num_templates - 1);
+}
+
+static ssize_t udg_sysfs_template_detection_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+ int attn_event;
+ unsigned char detection_status;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ attn_event = atomic_read(&udg->attn_event);
+ atomic_set(&udg->attn_event, 0);
+
+ if (attn_event == 0)
+ return snprintf(buf, PAGE_SIZE, "0\n");
+
+ if (udg->detection_status == 0) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.data_4,
+ rmi4_data->gesture_detection,
+ sizeof(rmi4_data->gesture_detection));
+ if (retval < 0)
+ return retval;
+
+ udg->detection_status = rmi4_data->gesture_detection[0];
+ }
+
+ detection_status = udg->detection_status;
+ udg->detection_status = 0;
+
+ switch (detection_status) {
+ case DETECTION:
+ udg->detection_score = rmi4_data->gesture_detection[1];
+ udg->detection_index = rmi4_data->gesture_detection[4];
+ udg->trace_size = rmi4_data->gesture_detection[3];
+ break;
+ case REGISTRATION:
+ udg->registration_status = rmi4_data->gesture_detection[1];
+ udg->trace_size = rmi4_data->gesture_detection[3];
+ break;
+ default:
+ return snprintf(buf, PAGE_SIZE, "0\n");
+ }
+
+ return snprintf(buf, PAGE_SIZE, "0x%02x\n", detection_status);
+}
+
+static ssize_t udg_sysfs_template_index_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned long index;
+
+ retval = sstrtoul(buf, 10, &index);
+ if (retval)
+ return retval;
+
+ retval = udg_set_index((unsigned char)index);
+ if (retval < 0)
+ return retval;
+
+ return count;
+}
+
+static ssize_t udg_sysfs_template_valid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+ unsigned char valid;
+ unsigned char offset;
+ unsigned char byte_num;
+ unsigned char template_flags[2];
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ byte_num = udg->template_index / 8;
+ offset = udg->template_index % 8;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.template_flags,
+ template_flags,
+ sizeof(template_flags));
+ if (retval < 0)
+ return retval;
+
+ valid = (template_flags[byte_num] & (1 << offset)) >> offset;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", valid);
+}
+
+static ssize_t udg_sysfs_template_valid_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned long valid;
+ unsigned char offset;
+ unsigned char byte_num;
+ unsigned char template_flags[2];
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ retval = sstrtoul(buf, 10, &valid);
+ if (retval)
+ return retval;
+
+ if (valid > 0)
+ valid = 1;
+
+ byte_num = udg->template_index / 8;
+ offset = udg->template_index % 8;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.template_flags,
+ template_flags,
+ sizeof(template_flags));
+ if (retval < 0)
+ return retval;
+
+ if (valid)
+ template_flags[byte_num] |= (1 << offset);
+ else
+ template_flags[byte_num] &= ~(1 << offset);
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ udg->addr.template_flags,
+ template_flags,
+ sizeof(template_flags));
+ if (retval < 0)
+ return retval;
+
+#ifdef STORE_GESTURES
+ udg_read_valid_data();
+#endif
+
+ return count;
+}
+
+static ssize_t udg_sysfs_template_clear_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned int input;
+ const char cmd[] = {'0', 0};
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ if (kstrtouint(buf, 10, &input) != 1)
+ return -EINVAL;
+
+ if (input != 1)
+ return -EINVAL;
+
+ memset(udg->template_data_buf, 0x00, udg->template_data_size);
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ udg->addr.template_data,
+ udg->template_data_buf,
+ udg->template_data_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to clear template data\n",
+ __func__);
+ return retval;
+ }
+
+ retval = udg_sysfs_template_valid_store(dev, attr, cmd, 1);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to clear valid bit\n",
+ __func__);
+ return retval;
+ }
+
+#ifdef STORE_GESTURES
+ udg_read_template_data(udg->template_index);
+ udg_read_valid_data();
+#endif
+
+ return count;
+}
+
+static ssize_t udg_sysfs_trace_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", udg->trace_size);
+}
+
+static ssize_t udg_sysfs_trace_data_show(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count)
+{
+ int retval;
+ unsigned short index = 0;
+ unsigned short trace_data_size;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ trace_data_size = udg->trace_size * 5;
+
+ if (trace_data_size == 0)
+ return -EINVAL;
+
+ if (count < trace_data_size) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Not enough space (%d bytes) in buffer\n",
+ __func__, (unsigned int)count);
+ return -EINVAL;
+ }
+
+ if (udg->trace_data_buf_size < trace_data_size) {
+ if (udg->trace_data_buf_size)
+ kfree(udg->trace_data_buf);
+ udg->trace_data_buf = kzalloc(trace_data_size, GFP_KERNEL);
+ if (!udg->trace_data_buf) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for trace data buffer\n",
+ __func__);
+ udg->trace_data_buf_size = 0;
+ return -ENOMEM;
+ }
+ udg->trace_data_buf_size = trace_data_size;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.trace_x,
+ &udg->trace_data_buf[index],
+ udg->trace_size * 2);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read trace X data\n",
+ __func__);
+ return retval;
+ } else {
+ index += udg->trace_size * 2;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.trace_y,
+ &udg->trace_data_buf[index],
+ udg->trace_size * 2);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read trace Y data\n",
+ __func__);
+ return retval;
+ } else {
+ index += udg->trace_size * 2;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.trace_segment,
+ &udg->trace_data_buf[index],
+ udg->trace_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read trace segment data\n",
+ __func__);
+ return retval;
+ }
+
+ retval = secure_memcpy(buf, count, udg->trace_data_buf,
+ udg->trace_data_buf_size, trace_data_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy trace data\n",
+ __func__);
+ return retval;
+ }
+
+ return trace_data_size;
+}
+
+static ssize_t udg_sysfs_template_data_show(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ if (count < udg->template_data_size) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Not enough space (%d bytes) in buffer\n",
+ __func__, (unsigned int)count);
+ return -EINVAL;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.template_data,
+ udg->template_data_buf,
+ udg->template_data_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read template data\n",
+ __func__);
+ return retval;
+ }
+
+ retval = secure_memcpy(buf, count, udg->template_data_buf,
+ udg->template_data_size, udg->template_data_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy template data\n",
+ __func__);
+ return retval;
+ }
+
+#ifdef STORE_GESTURES
+ udg_read_template_data(udg->template_index);
+ udg_read_valid_data();
+#endif
+
+ return udg->template_data_size;
+}
+
+static ssize_t udg_sysfs_template_data_store(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ retval = secure_memcpy(udg->template_data_buf, udg->template_data_size,
+ buf, count, count);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy template data\n",
+ __func__);
+ return retval;
+ }
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ udg->addr.template_data,
+ udg->template_data_buf,
+ count);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write template data\n",
+ __func__);
+ return retval;
+ }
+
+#ifdef STORE_GESTURES
+ udg_read_template_data(udg->template_index);
+ udg_read_valid_data();
+#endif
+
+ return count;
+}
+
+static ssize_t udg_sysfs_template_displacement_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+ unsigned short template_displacement;
+
+ retval = udg_read_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ template_displacement =
+ ((unsigned short)udg->tuning.template_disp_lsb << 0) |
+ ((unsigned short)udg->tuning.template_disp_msb << 8);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", template_displacement);
+}
+
+static ssize_t udg_sysfs_template_displacement_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned long input;
+
+ retval = sstrtoul(buf, 10, &input);
+ if (retval)
+ return retval;
+
+ retval = udg_read_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ udg->tuning.template_disp_lsb = (unsigned char)(input >> 0);
+ udg->tuning.template_disp_msb = (unsigned char)(input >> 8);
+
+ retval = udg_write_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ return count;
+}
+
+static ssize_t udg_sysfs_rotation_invariance_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+ unsigned short rotation_invariance;
+
+ retval = udg_read_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ rotation_invariance =
+ ((unsigned short)udg->tuning.rotation_inv_lsb << 0) |
+ ((unsigned short)udg->tuning.rotation_inv_msb << 8);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", rotation_invariance);
+}
+
+static ssize_t udg_sysfs_rotation_invariance_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned long input;
+
+ retval = sstrtoul(buf, 10, &input);
+ if (retval)
+ return retval;
+
+ retval = udg_read_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ udg->tuning.rotation_inv_lsb = (unsigned char)(input >> 0);
+ udg->tuning.rotation_inv_msb = (unsigned char)(input >> 8);
+
+ retval = udg_write_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ return count;
+}
+
+static ssize_t udg_sysfs_scale_invariance_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+ unsigned short scale_invariance;
+
+ retval = udg_read_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ scale_invariance =
+ ((unsigned short)udg->tuning.scale_inv_lsb << 0) |
+ ((unsigned short)udg->tuning.scale_inv_msb << 8);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", scale_invariance);
+}
+
+static ssize_t udg_sysfs_scale_invariance_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned long input;
+
+ retval = sstrtoul(buf, 10, &input);
+ if (retval)
+ return retval;
+
+ retval = udg_read_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ udg->tuning.scale_inv_lsb = (unsigned char)(input >> 0);
+ udg->tuning.scale_inv_msb = (unsigned char)(input >> 8);
+
+ retval = udg_write_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ return count;
+}
+
+static ssize_t udg_sysfs_threshold_factor_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+ unsigned short threshold_factor;
+
+ retval = udg_read_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ threshold_factor =
+ ((unsigned short)udg->tuning.thres_factor_lsb << 0) |
+ ((unsigned short)udg->tuning.thres_factor_msb << 8);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", threshold_factor);
+}
+
+static ssize_t udg_sysfs_threshold_factor_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned long input;
+
+ retval = sstrtoul(buf, 10, &input);
+ if (retval)
+ return retval;
+
+ retval = udg_read_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ udg->tuning.thres_factor_lsb = (unsigned char)(input >> 0);
+ udg->tuning.thres_factor_msb = (unsigned char)(input >> 8);
+
+ retval = udg_write_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ return count;
+}
+
+static ssize_t udg_sysfs_match_metric_threshold_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+ unsigned short match_metric_threshold;
+
+ retval = udg_read_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ match_metric_threshold =
+ ((unsigned short)udg->tuning.metric_thres_lsb << 0) |
+ ((unsigned short)udg->tuning.metric_thres_msb << 8);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", match_metric_threshold);
+}
+
+static ssize_t udg_sysfs_match_metric_threshold_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned long input;
+
+ retval = sstrtoul(buf, 10, &input);
+ if (retval)
+ return retval;
+
+ retval = udg_read_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ udg->tuning.metric_thres_lsb = (unsigned char)(input >> 0);
+ udg->tuning.metric_thres_msb = (unsigned char)(input >> 8);
+
+ retval = udg_write_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ return count;
+}
+
+static ssize_t udg_sysfs_max_inter_stroke_time_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+ unsigned short max_inter_stroke_time;
+
+ retval = udg_read_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ max_inter_stroke_time =
+ ((unsigned short)udg->tuning.inter_stroke_lsb << 0) |
+ ((unsigned short)udg->tuning.inter_stroke_msb << 8);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", max_inter_stroke_time);
+}
+
+static ssize_t udg_sysfs_max_inter_stroke_time_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned long input;
+
+ retval = sstrtoul(buf, 10, &input);
+ if (retval)
+ return retval;
+
+ retval = udg_read_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ udg->tuning.inter_stroke_lsb = (unsigned char)(input >> 0);
+ udg->tuning.inter_stroke_msb = (unsigned char)(input >> 8);
+
+ retval = udg_write_tuning_params();
+ if (retval < 0)
+ return retval;
+
+ return count;
+}
+
+static int udg_ctrl_subpacket(unsigned char ctrlreg,
+ unsigned char subpacket,
+ struct synaptics_rmi4_f12_query_5 *query_5)
+{
+ int retval;
+ unsigned char cnt;
+ unsigned char regnum;
+ unsigned char bitnum;
+ unsigned char q5_index;
+ unsigned char q6_index;
+ unsigned char offset;
+ unsigned char max_ctrlreg;
+ unsigned char *query_6;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ max_ctrlreg = (sizeof(query_5->data) - 1) * 8 - 1;
+
+ if (ctrlreg > max_ctrlreg) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Control register number (%d) over limit\n",
+ __func__, ctrlreg);
+ return -EINVAL;
+ }
+
+ q5_index = ctrlreg / 8 + 1;
+ bitnum = ctrlreg % 8;
+ if ((query_5->data[q5_index] & (1 << bitnum)) == 0x00) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Control %d is not present\n",
+ __func__, ctrlreg);
+ return -EINVAL;
+ }
+
+ query_6 = kmalloc(query_5->size_of_query6, GFP_KERNEL);
+ if (!query_6) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for query 6\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->query_base_addr + 6,
+ query_6,
+ query_5->size_of_query6);
+ if (retval < 0)
+ goto exit;
+
+ q6_index = 0;
+
+ for (regnum = 0; regnum < ctrlreg; regnum++) {
+ q5_index = regnum / 8 + 1;
+ bitnum = regnum % 8;
+ if ((query_5->data[q5_index] & (1 << bitnum)) == 0x00)
+ continue;
+
+ if (query_6[q6_index] == 0x00)
+ q6_index += 3;
+ else
+ q6_index++;
+
+ while (query_6[q6_index] & ~MASK_7BIT)
+ q6_index++;
+
+ q6_index++;
+ }
+
+ cnt = 0;
+ q6_index++;
+ offset = subpacket / 7;
+ bitnum = subpacket % 7;
+
+ do {
+ if (cnt == offset) {
+ if (query_6[q6_index + cnt] & (1 << bitnum))
+ retval = 1;
+ else
+ retval = 0;
+ goto exit;
+ }
+ cnt++;
+ } while (query_6[q6_index + cnt - 1] & ~MASK_7BIT);
+
+ retval = 0;
+
+exit:
+ kfree(query_6);
+
+ return retval;
+}
+
+static int udg_read_tuning_params(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.ctrl_18,
+ udg->ctrl_buf,
+ udg->ctrl_18_sub10_off + sizeof(struct udg_tuning));
+ if (retval < 0)
+ return retval;
+
+ secure_memcpy(udg->tuning.data,
+ sizeof(udg->tuning.data),
+ (unsigned char *)&udg->ctrl_buf[udg->ctrl_18_sub10_off],
+ sizeof(struct udg_tuning),
+ sizeof(struct udg_tuning));
+
+ return 0;
+}
+
+static int udg_write_tuning_params(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ secure_memcpy((unsigned char *)&udg->ctrl_buf[udg->ctrl_18_sub10_off],
+ sizeof(struct udg_tuning),
+ udg->tuning.data,
+ sizeof(udg->tuning.data),
+ sizeof(struct udg_tuning));
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ udg->addr.ctrl_18,
+ udg->ctrl_buf,
+ udg->ctrl_18_sub10_off + sizeof(struct udg_tuning));
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+static int udg_detection_enable(bool enable)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.ctrl_20,
+ udg->ctrl_buf,
+ udg->ctrl_20_sub1_off + 1);
+ if (retval < 0)
+ return retval;
+
+ if (enable)
+ udg->ctrl_buf[udg->ctrl_20_sub1_off] = WAKEUP_GESTURE_MODE;
+ else
+ udg->ctrl_buf[udg->ctrl_20_sub1_off] = udg->report_flags;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ udg->addr.ctrl_20,
+ udg->ctrl_buf,
+ udg->ctrl_20_sub1_off + 1);
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+static int udg_engine_enable(bool enable)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ if (enable) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.ctrl_27,
+ udg->ctrl_buf,
+ udg->ctrl_27_sub5_off + 1);
+ if (retval < 0)
+ return retval;
+
+ udg->ctrl_buf[udg->ctrl_27_sub5_off] |=
+ (1 << CTRL27_UDG_ENABLE_BIT);
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ udg->addr.ctrl_27,
+ udg->ctrl_buf,
+ udg->ctrl_27_sub5_off + 1);
+ if (retval < 0)
+ return retval;
+ } else {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.ctrl_27,
+ udg->ctrl_buf,
+ udg->ctrl_27_sub5_off + 1);
+ if (retval < 0)
+ return retval;
+
+ udg->ctrl_buf[udg->ctrl_27_sub5_off] &=
+ ~(1 << CTRL27_UDG_ENABLE_BIT);
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ udg->addr.ctrl_27,
+ udg->ctrl_buf,
+ udg->ctrl_27_sub5_off + 1);
+ if (retval < 0)
+ return retval;
+ }
+
+ return 0;
+}
+
+static void udg_report(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ atomic_set(&udg->attn_event, 1);
+
+ if (rmi4_data->suspend) {
+ if (rmi4_data->gesture_detection[0] == 0) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.data_4,
+ rmi4_data->gesture_detection,
+ sizeof(rmi4_data->gesture_detection));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read gesture detection\n",
+ __func__);
+ return;
+ }
+ }
+
+ udg->detection_status = rmi4_data->gesture_detection[0];
+ rmi4_data->gesture_detection[0] = 0;
+
+ if (udg->detection_status == DETECTION) {
+ input_report_key(udg->udg_dev, KEY_WAKEUP, 1);
+ input_sync(udg->udg_dev);
+ input_report_key(udg->udg_dev, KEY_WAKEUP, 0);
+ input_sync(udg->udg_dev);
+ rmi4_data->suspend = false;
+ }
+ }
+
+ return;
+}
+
+static int udg_set_index(unsigned char index)
+{
+ int retval;
+ struct synaptics_rmi4_f12_control_41 control_41;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ if (index >= udg->max_num_templates)
+ return -EINVAL;
+
+ udg->template_index = index;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.ctrl_41,
+ control_41.data,
+ sizeof(control_41.data));
+ if (retval < 0)
+ return retval;
+
+ control_41.template_index = udg->template_index;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ udg->addr.ctrl_41,
+ control_41.data,
+ sizeof(control_41.data));
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+#ifdef STORE_GESTURES
+static int udg_read_valid_data(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.template_flags,
+ udg->valid_buf,
+ sizeof(udg->valid_buf));
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+static int udg_write_valid_data(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ udg->addr.template_flags,
+ udg->valid_buf,
+ sizeof(udg->valid_buf));
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+static int udg_read_template_data(unsigned char index)
+{
+ int retval;
+ unsigned char *storage;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ udg_set_index(index);
+ storage = &(udg->storage_buf[index * udg->template_data_size]);
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.template_data,
+ storage,
+ udg->template_data_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read template data\n",
+ __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+static int udg_write_template_data(void)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char *storage;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ for (ii = 0; ii < udg->gestures_to_store; ii++) {
+ udg_set_index(ii);
+ storage = &(udg->storage_buf[ii * udg->template_data_size]);
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ udg->addr.template_data,
+ storage,
+ udg->template_data_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write template data\n",
+ __func__);
+ return retval;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static int udg_reg_init(void)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char data_offset;
+ unsigned char size_of_query;
+ unsigned char ctrl_18_offset;
+ unsigned char ctrl_20_offset;
+ unsigned char ctrl_23_offset;
+ unsigned char ctrl_27_offset;
+ unsigned char ctrl_41_offset;
+ struct synaptics_rmi4_f12_query_0 query_0;
+ struct synaptics_rmi4_f12_query_5 query_5;
+ struct synaptics_rmi4_f12_query_8 query_8;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->query_base_addr + 7,
+ &size_of_query,
+ sizeof(size_of_query));
+ if (retval < 0)
+ return retval;
+
+ if (size_of_query < 4) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: User defined gesture support unavailable (missing data registers)\n",
+ __func__);
+ retval = -ENODEV;
+ return retval;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->query_base_addr + 8,
+ query_8.data,
+ sizeof(query_8.data));
+ if (retval < 0)
+ return retval;
+
+ if ((query_8.data16_is_present) &&
+ (query_8.data17_is_present) &&
+ (query_8.data18_is_present) &&
+ (query_8.data19_is_present) &&
+ (query_8.data20_is_present) &&
+ (query_8.data21_is_present)) {
+ data_offset = query_8.data0_is_present +
+ query_8.data1_is_present +
+ query_8.data2_is_present +
+ query_8.data3_is_present;
+ udg->addr.data_4 = udg->data_base_addr + data_offset;
+ data_offset = data_offset +
+ query_8.data4_is_present +
+ query_8.data5_is_present +
+ query_8.data6_is_present +
+ query_8.data7_is_present +
+ query_8.data8_is_present +
+ query_8.data9_is_present +
+ query_8.data10_is_present +
+ query_8.data11_is_present +
+ query_8.data12_is_present +
+ query_8.data13_is_present +
+ query_8.data14_is_present +
+ query_8.data15_is_present;
+ udg->addr.trace_x = udg->data_base_addr + data_offset;
+ udg->addr.trace_y = udg->addr.trace_x + 1;
+ udg->addr.trace_segment = udg->addr.trace_y + 1;
+ udg->addr.template_helper = udg->addr.trace_segment + 1;
+ udg->addr.template_data = udg->addr.template_helper + 1;
+ udg->addr.template_flags = udg->addr.template_data + 1;
+ } else {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: User defined gesture support unavailable (missing data registers)\n",
+ __func__);
+ retval = -ENODEV;
+ return retval;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->query_base_addr + 4,
+ &size_of_query,
+ sizeof(size_of_query));
+ if (retval < 0)
+ return retval;
+
+ if (size_of_query < 7) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: User defined gesture support unavailable (missing control registers)\n",
+ __func__);
+ retval = -ENODEV;
+ return retval;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->query_base_addr + 5,
+ query_5.data,
+ sizeof(query_5.data));
+ if (retval < 0)
+ return retval;
+
+ ctrl_18_offset = query_5.ctrl0_is_present +
+ query_5.ctrl1_is_present +
+ query_5.ctrl2_is_present +
+ query_5.ctrl3_is_present +
+ query_5.ctrl4_is_present +
+ query_5.ctrl5_is_present +
+ query_5.ctrl6_is_present +
+ query_5.ctrl7_is_present +
+ query_5.ctrl8_is_present +
+ query_5.ctrl9_is_present +
+ query_5.ctrl10_is_present +
+ query_5.ctrl11_is_present +
+ query_5.ctrl12_is_present +
+ query_5.ctrl13_is_present +
+ query_5.ctrl14_is_present +
+ query_5.ctrl15_is_present +
+ query_5.ctrl16_is_present +
+ query_5.ctrl17_is_present;
+
+ ctrl_20_offset = ctrl_18_offset +
+ query_5.ctrl18_is_present +
+ query_5.ctrl19_is_present;
+
+ ctrl_23_offset = ctrl_20_offset +
+ query_5.ctrl20_is_present +
+ query_5.ctrl21_is_present +
+ query_5.ctrl22_is_present;
+
+ ctrl_27_offset = ctrl_23_offset+
+ query_5.ctrl23_is_present +
+ query_5.ctrl24_is_present +
+ query_5.ctrl25_is_present +
+ query_5.ctrl26_is_present;
+
+ ctrl_41_offset = ctrl_27_offset+
+ query_5.ctrl27_is_present +
+ query_5.ctrl28_is_present +
+ query_5.ctrl29_is_present +
+ query_5.ctrl30_is_present +
+ query_5.ctrl31_is_present +
+ query_5.ctrl32_is_present +
+ query_5.ctrl33_is_present +
+ query_5.ctrl34_is_present +
+ query_5.ctrl35_is_present +
+ query_5.ctrl36_is_present +
+ query_5.ctrl37_is_present +
+ query_5.ctrl38_is_present +
+ query_5.ctrl39_is_present +
+ query_5.ctrl40_is_present;
+
+ udg->addr.ctrl_18 = udg->control_base_addr + ctrl_18_offset;
+ udg->addr.ctrl_20 = udg->control_base_addr + ctrl_20_offset;
+ udg->addr.ctrl_23 = udg->control_base_addr + ctrl_23_offset;
+ udg->addr.ctrl_27 = udg->control_base_addr + ctrl_27_offset;
+ udg->addr.ctrl_41 = udg->control_base_addr + ctrl_41_offset;
+
+ udg->ctrl_18_sub10_off = 0;
+ for (ii = 0; ii < 10; ii++) {
+ retval = udg_ctrl_subpacket(18, ii, &query_5);
+ if (retval == 1)
+ udg->ctrl_18_sub10_off += ctrl_18_sub_size[ii];
+ else if (retval < 0)
+ return retval;
+ }
+
+ udg->ctrl_20_sub1_off = 0;
+ for (ii = 0; ii < 1; ii++) {
+ retval = udg_ctrl_subpacket(20, ii, &query_5);
+ if (retval == 1)
+ udg->ctrl_20_sub1_off += ctrl_20_sub_size[ii];
+ else if (retval < 0)
+ return retval;
+ }
+
+ udg->ctrl_23_sub3_off = 0;
+ for (ii = 0; ii < 3; ii++) {
+ retval = udg_ctrl_subpacket(23, ii, &query_5);
+ if (retval == 1)
+ udg->ctrl_23_sub3_off += ctrl_23_sub_size[ii];
+ else if (retval < 0)
+ return retval;
+ }
+
+ retval = udg_ctrl_subpacket(23, 3, &query_5);
+ if (retval == 0)
+ udg->ctrl_23_sub3_off = 0;
+ else if (retval < 0)
+ return retval;
+
+ udg->ctrl_27_sub5_off = 0;
+ for (ii = 0; ii < 5; ii++) {
+ retval = udg_ctrl_subpacket(27, ii, &query_5);
+ if (retval == 1)
+ udg->ctrl_27_sub5_off += ctrl_27_sub_size[ii];
+ else if (retval < 0)
+ return retval;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->query_base_addr + 0,
+ query_0.data,
+ sizeof(query_0.data));
+ if (retval < 0)
+ return retval;
+
+ udg->max_num_templates = query_0.max_num_templates;
+ udg->template_size =
+ ((unsigned short)query_0.template_size_lsb << 0) |
+ ((unsigned short)query_0.template_size_msb << 8);
+ udg->template_data_size = udg->template_size * 4 * 2 + 4 + 1;
+
+#ifdef STORE_GESTURES
+ udg->gestures_to_store = udg->max_num_templates;
+ if (GESTURES_TO_STORE < udg->gestures_to_store)
+ udg->gestures_to_store = GESTURES_TO_STORE;
+#endif
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.ctrl_20,
+ udg->ctrl_buf,
+ udg->ctrl_20_sub1_off + 1);
+ if (retval < 0)
+ return retval;
+
+ udg->report_flags = udg->ctrl_buf[udg->ctrl_20_sub1_off];
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ udg->addr.ctrl_23,
+ udg->ctrl_buf,
+ udg->ctrl_23_sub3_off + 1);
+ if (retval < 0)
+ return retval;
+
+ udg->object_type_enable1 = udg->ctrl_buf[0];
+ if (udg->ctrl_23_sub3_off)
+ udg->object_type_enable2 = udg->ctrl_buf[udg->ctrl_23_sub3_off];
+
+ return retval;
+}
+
+static int udg_scan_pdt(void)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char page;
+ unsigned char intr_count = 0;
+ unsigned char intr_off;
+ unsigned char intr_src;
+ unsigned short addr;
+ struct synaptics_rmi4_fn_desc fd;
+ struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+ for (page = 0; page < PAGES_TO_SERVICE; page++) {
+ for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+ addr |= (page << 8);
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ addr,
+ (unsigned char *)&fd,
+ sizeof(fd));
+ if (retval < 0)
+ return retval;
+
+ addr &= ~(MASK_8BIT << 8);
+
+ if (fd.fn_number) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Found F%02x\n",
+ __func__, fd.fn_number);
+ switch (fd.fn_number) {
+ case SYNAPTICS_RMI4_F12:
+ goto f12_found;
+ break;
+ }
+ } else {
+ break;
+ }
+
+ intr_count += fd.intr_src_count;
+ }
+ }
+
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to find F12\n",
+ __func__);
+ return -EINVAL;
+
+f12_found:
+ udg->query_base_addr = fd.query_base_addr | (page << 8);
+ udg->control_base_addr = fd.ctrl_base_addr | (page << 8);
+ udg->data_base_addr = fd.data_base_addr | (page << 8);
+ udg->command_base_addr = fd.cmd_base_addr | (page << 8);
+
+ retval = udg_reg_init();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to initialize user defined gesture registers\n",
+ __func__);
+ return retval;
+ }
+
+ udg->intr_mask = 0;
+ intr_src = fd.intr_src_count;
+ intr_off = intr_count % 8;
+ for (ii = intr_off;
+ ii < (intr_src + intr_off);
+ ii++) {
+ udg->intr_mask |= 1 << ii;
+ }
+
+ rmi4_data->intr_mask[0] |= udg->intr_mask;
+
+ addr = rmi4_data->f01_ctrl_base_addr + 1;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ addr,
+ &rmi4_data->intr_mask[0],
+ sizeof(rmi4_data->intr_mask[0]));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set interrupt enable bit\n",
+ __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+static void synaptics_rmi4_udg_attn(struct synaptics_rmi4_data *rmi4_data,
+ unsigned char intr_mask)
+{
+ if (!udg)
+ return;
+
+ if (udg->intr_mask & intr_mask)
+ udg_report();
+
+ return;
+}
+
+static int synaptics_rmi4_udg_init(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char size;
+ unsigned char attr_count;
+ unsigned char param_count;
+
+ if (udg) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Handle already exists\n",
+ __func__);
+ return 0;
+ }
+
+ udg = kzalloc(sizeof(*udg), GFP_KERNEL);
+ if (!udg) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for udg\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ size = 0;
+ for (ii = 0; ii < sizeof(ctrl_18_sub_size); ii++)
+ size += ctrl_18_sub_size[ii];
+ size += sizeof(struct udg_tuning);
+ udg->ctrl_buf = kzalloc(size, GFP_KERNEL);
+ if (!udg->ctrl_buf) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for ctrl_buf\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit_free_udg;
+ }
+
+ udg->rmi4_data = rmi4_data;
+
+ retval = udg_scan_pdt();
+ if (retval < 0)
+ goto exit_free_ctrl_buf;
+
+ udg->template_data_buf = kzalloc(udg->template_data_size, GFP_KERNEL);
+ if (!udg->template_data_buf) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for template_data_buf\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit_free_ctrl_buf;
+ }
+
+#ifdef STORE_GESTURES
+ udg->storage_buf = kzalloc(
+ udg->template_data_size * udg->gestures_to_store,
+ GFP_KERNEL);
+ if (!udg->storage_buf) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for storage_buf\n",
+ __func__);
+ kfree(udg->template_data_buf);
+ retval = -ENOMEM;
+ goto exit_free_ctrl_buf;
+ }
+#endif
+
+ udg->udg_dev = input_allocate_device();
+ if (udg->udg_dev == NULL) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to allocate gesture device\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit_free_template_data_buf;
+ }
+
+ udg->udg_dev->name = GESTURE_DRIVER_NAME;
+ udg->udg_dev->phys = GESTURE_PHYS_NAME;
+ udg->udg_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+ udg->udg_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+ udg->udg_dev->dev.parent = rmi4_data->pdev->dev.parent;
+ input_set_drvdata(udg->udg_dev, rmi4_data);
+
+ set_bit(EV_KEY, udg->udg_dev->evbit);
+ set_bit(KEY_WAKEUP, udg->udg_dev->keybit);
+ input_set_capability(udg->udg_dev, EV_KEY, KEY_WAKEUP);
+
+ retval = input_register_device(udg->udg_dev);
+ if (retval) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to register gesture device\n",
+ __func__);
+ input_free_device(udg->udg_dev);
+ goto exit_free_template_data_buf;
+ }
+
+ udg->tuning_dir = kobject_create_and_add(TUNING_SYSFS_DIR_NAME,
+ &udg->udg_dev->dev.kobj);
+ if (!udg->tuning_dir) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create tuning sysfs directory\n",
+ __func__);
+ goto exit_unregister_input_device;
+ }
+
+ retval = sysfs_create_bin_file(&udg->udg_dev->dev.kobj, &template_data);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create template data bin file\n",
+ __func__);
+ goto exit_remove_sysfs_directory;
+ }
+
+ retval = sysfs_create_bin_file(&udg->udg_dev->dev.kobj, &trace_data);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create trace data bin file\n",
+ __func__);
+ goto exit_remove_bin_file;
+ }
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ retval = sysfs_create_file(&udg->udg_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create sysfs attributes\n",
+ __func__);
+ retval = -ENODEV;
+ goto exit_remove_attrs;
+ }
+ }
+
+ for (param_count = 0; param_count < ARRAY_SIZE(params); param_count++) {
+ retval = sysfs_create_file(udg->tuning_dir,
+ ¶ms[param_count].attr);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create tuning parameters\n",
+ __func__);
+ retval = -ENODEV;
+ goto exit_remove_params;
+ }
+ }
+
+ retval = udg_engine_enable(true);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to enable gesture engine\n",
+ __func__);
+ goto exit_remove_params;
+ }
+
+ return 0;
+
+exit_remove_params:
+ for (param_count--; param_count >= 0; param_count--) {
+ sysfs_remove_file(udg->tuning_dir,
+ ¶ms[param_count].attr);
+ }
+
+exit_remove_attrs:
+ for (attr_count--; attr_count >= 0; attr_count--) {
+ sysfs_remove_file(&udg->udg_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ }
+
+ sysfs_remove_bin_file(&udg->udg_dev->dev.kobj, &trace_data);
+
+exit_remove_bin_file:
+ sysfs_remove_bin_file(&udg->udg_dev->dev.kobj, &template_data);
+
+exit_remove_sysfs_directory:
+ kobject_put(udg->tuning_dir);
+
+exit_unregister_input_device:
+ input_unregister_device(udg->udg_dev);
+
+exit_free_template_data_buf:
+#ifdef STORE_GESTURES
+ kfree(udg->storage_buf);
+#endif
+ kfree(udg->template_data_buf);
+
+exit_free_ctrl_buf:
+ kfree(udg->ctrl_buf);
+
+exit_free_udg:
+ kfree(udg);
+ udg = NULL;
+
+exit:
+ return retval;
+}
+
+static void synaptics_rmi4_udg_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+ unsigned char count;
+
+ if (!udg)
+ goto exit;
+
+ for (count = 0; count < ARRAY_SIZE(params); count++) {
+ sysfs_remove_file(udg->tuning_dir,
+ ¶ms[count].attr);
+ }
+
+ for (count = 0; count < ARRAY_SIZE(attrs); count++) {
+ sysfs_remove_file(&udg->udg_dev->dev.kobj,
+ &attrs[count].attr);
+ }
+
+ sysfs_remove_bin_file(&udg->udg_dev->dev.kobj, &trace_data);
+ sysfs_remove_bin_file(&udg->udg_dev->dev.kobj, &template_data);
+ kobject_put(udg->tuning_dir);
+
+ input_unregister_device(udg->udg_dev);
+#ifdef STORE_GESTURES
+ kfree(udg->storage_buf);
+#endif
+ kfree(udg->template_data_buf);
+ kfree(udg->trace_data_buf);
+ kfree(udg->ctrl_buf);
+ kfree(udg);
+ udg = NULL;
+
+exit:
+ complete(&udg_remove_complete);
+
+ return;
+}
+
+static void synaptics_rmi4_udg_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!udg) {
+ synaptics_rmi4_udg_init(rmi4_data);
+ return;
+ }
+
+ udg_scan_pdt();
+ udg_engine_enable(true);
+#ifdef STORE_GESTURES
+ udg_write_template_data();
+ udg_write_valid_data();
+#endif
+
+ return;
+}
+
+static void synaptics_rmi4_udg_reinit(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!udg)
+ return;
+
+ udg_engine_enable(true);
+#ifdef STORE_GESTURES
+ udg_write_template_data();
+ udg_write_valid_data();
+#endif
+
+ return;
+}
+
+static void synaptics_rmi4_udg_e_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!udg)
+ return;
+
+ rmi4_data->sleep_enable(rmi4_data, false);
+ rmi4_data->irq_enable(rmi4_data, true, false);
+ enable_irq_wake(rmi4_data->irq);
+
+ udg_engine_enable(true);
+ udg_detection_enable(true);
+
+ return;
+}
+
+static void synaptics_rmi4_udg_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!udg)
+ return;
+
+ rmi4_data->sleep_enable(rmi4_data, false);
+ rmi4_data->irq_enable(rmi4_data, true, false);
+ enable_irq_wake(rmi4_data->irq);
+
+ udg_engine_enable(true);
+ udg_detection_enable(true);
+
+ return;
+}
+
+static void synaptics_rmi4_udg_resume(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!udg)
+ return;
+
+ disable_irq_wake(rmi4_data->irq);
+ udg_detection_enable(false);
+
+ return;
+}
+
+static void synaptics_rmi4_udg_l_resume(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!udg)
+ return;
+
+ disable_irq_wake(rmi4_data->irq);
+ udg_detection_enable(false);
+
+ return;
+}
+
+static struct synaptics_rmi4_exp_fn gesture_module = {
+ .fn_type = RMI_GESTURE,
+ .init = synaptics_rmi4_udg_init,
+ .remove = synaptics_rmi4_udg_remove,
+ .reset = synaptics_rmi4_udg_reset,
+ .reinit = synaptics_rmi4_udg_reinit,
+ .early_suspend = synaptics_rmi4_udg_e_suspend,
+ .suspend = synaptics_rmi4_udg_suspend,
+ .resume = synaptics_rmi4_udg_resume,
+ .late_resume = synaptics_rmi4_udg_l_resume,
+ .attn = synaptics_rmi4_udg_attn,
+};
+
+static int __init rmi4_gesture_module_init(void)
+{
+ synaptics_rmi4_new_function(&gesture_module, true);
+
+ return 0;
+}
+
+static void __exit rmi4_gesture_module_exit(void)
+{
+ synaptics_rmi4_new_function(&gesture_module, false);
+
+ wait_for_completion(&udg_remove_complete);
+
+ return;
+}
+
+module_init(rmi4_gesture_module_init);
+module_exit(rmi4_gesture_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX User Defined Gesture Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c
new file mode 100644
index 0000000..8776d4a
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c
@@ -0,0 +1,606 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/types.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define SYN_I2C_RETRY_TIMES 10
+#define rd_msgs 1
+
+static unsigned char *wr_buf;
+
+static struct synaptics_dsx_hw_interface hw_if;
+
+static struct platform_device *synaptics_dsx_i2c_device;
+
+#ifdef CONFIG_OF
+static int parse_dt(struct device *dev, struct synaptics_dsx_board_data *bdata)
+{
+ int retval;
+ u32 value;
+ const char *name;
+ struct property *prop;
+ struct device_node *np = dev->of_node;
+
+ bdata->irq_gpio = of_get_named_gpio_flags(np,
+ "synaptics,irq-gpio", 0,
+ (enum of_gpio_flags *)&bdata->irq_flags);
+
+ retval = of_property_read_u32(np, "synaptics,irq-on-state",
+ &value);
+ if (retval < 0)
+ bdata->irq_on_state = 0;
+ else
+ bdata->irq_on_state = value;
+
+ retval = of_property_read_string(np, "synaptics,pwr-reg-name", &name);
+ if (retval < 0)
+ bdata->pwr_reg_name = NULL;
+ else
+ bdata->pwr_reg_name = name;
+
+ retval = of_property_read_string(np, "synaptics,bus-reg-name", &name);
+ if (retval < 0)
+ bdata->bus_reg_name = NULL;
+ else
+ bdata->bus_reg_name = name;
+
+ prop = of_find_property(np, "synaptics,power-gpio", NULL);
+ if (prop && prop->length) {
+ bdata->power_gpio = of_get_named_gpio_flags(np,
+ "synaptics,power-gpio", 0, NULL);
+ retval = of_property_read_u32(np, "synaptics,power-on-state",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,power-on-state property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->power_on_state = value;
+ }
+ } else {
+ bdata->power_gpio = -1;
+ }
+
+ prop = of_find_property(np, "synaptics,power-delay-ms", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,power-delay-ms",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,power-delay-ms property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->power_delay_ms = value;
+ }
+ } else {
+ bdata->power_delay_ms = 0;
+ }
+
+ prop = of_find_property(np, "synaptics,reset-gpio", NULL);
+ if (prop && prop->length) {
+ bdata->reset_gpio = of_get_named_gpio_flags(np,
+ "synaptics,reset-gpio", 0, NULL);
+ retval = of_property_read_u32(np, "synaptics,reset-on-state",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,reset-on-state property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->reset_on_state = value;
+ }
+ retval = of_property_read_u32(np, "synaptics,reset-active-ms",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,reset-active-ms property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->reset_active_ms = value;
+ }
+ } else {
+ bdata->reset_gpio = -1;
+ }
+
+ prop = of_find_property(np, "synaptics,reset-delay-ms", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,reset-delay-ms",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,reset-delay-ms property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->reset_delay_ms = value;
+ }
+ } else {
+ bdata->reset_delay_ms = 0;
+ }
+
+ prop = of_find_property(np, "synaptics,max-y-for-2d", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,max-y-for-2d",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,max-y-for-2d property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->max_y_for_2d = value;
+ }
+ } else {
+ bdata->max_y_for_2d = -1;
+ }
+
+ bdata->swap_axes = of_property_read_bool(np, "synaptics,swap-axes");
+ bdata->x_flip = of_property_read_bool(np, "synaptics,x-flip");
+ bdata->y_flip = of_property_read_bool(np, "synaptics,y-flip");
+
+ prop = of_find_property(np, "synaptics,ub-i2c-addr", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,ub-i2c-addr",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,ub-i2c-addr property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->ub_i2c_addr = (unsigned short)value;
+ }
+ } else {
+ bdata->ub_i2c_addr = -1;
+ }
+
+ prop = of_find_property(np, "synaptics,cap-button-codes", NULL);
+ if (prop && prop->length) {
+ bdata->cap_button_map->map = devm_kzalloc(dev,
+ prop->length,
+ GFP_KERNEL);
+ if (!bdata->cap_button_map->map)
+ return -ENOMEM;
+ bdata->cap_button_map->nbuttons = prop->length / sizeof(u32);
+ retval = of_property_read_u32_array(np,
+ "synaptics,cap-button-codes",
+ bdata->cap_button_map->map,
+ bdata->cap_button_map->nbuttons);
+ if (retval < 0) {
+ bdata->cap_button_map->nbuttons = 0;
+ bdata->cap_button_map->map = NULL;
+ }
+ } else {
+ bdata->cap_button_map->nbuttons = 0;
+ bdata->cap_button_map->map = NULL;
+ }
+
+ prop = of_find_property(np, "synaptics,vir-button-codes", NULL);
+ if (prop && prop->length) {
+ bdata->vir_button_map->map = devm_kzalloc(dev,
+ prop->length,
+ GFP_KERNEL);
+ if (!bdata->vir_button_map->map)
+ return -ENOMEM;
+ bdata->vir_button_map->nbuttons = prop->length / sizeof(u32);
+ bdata->vir_button_map->nbuttons /= 5;
+ retval = of_property_read_u32_array(np,
+ "synaptics,vir-button-codes",
+ bdata->vir_button_map->map,
+ bdata->vir_button_map->nbuttons * 5);
+ if (retval < 0) {
+ bdata->vir_button_map->nbuttons = 0;
+ bdata->vir_button_map->map = NULL;
+ }
+ } else {
+ bdata->vir_button_map->nbuttons = 0;
+ bdata->vir_button_map->map = NULL;
+ }
+
+ return 0;
+}
+#endif
+
+static int synaptics_rmi4_i2c_alloc_buf(struct synaptics_rmi4_data *rmi4_data,
+ unsigned int count)
+{
+ static unsigned int buf_size;
+
+ if (count > buf_size) {
+ if (buf_size)
+ kfree(wr_buf);
+ wr_buf = kzalloc(count, GFP_KERNEL);
+ if (!wr_buf) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for buffer\n",
+ __func__);
+ buf_size = 0;
+ return -ENOMEM;
+ }
+ buf_size = count;
+ }
+
+ return 0;
+}
+
+static void synaptics_rmi4_i2c_check_addr(struct synaptics_rmi4_data *rmi4_data,
+ struct i2c_client *i2c)
+{
+ if (hw_if.board_data->ub_i2c_addr == -1)
+ return;
+
+ if (hw_if.board_data->i2c_addr == i2c->addr)
+ hw_if.board_data->i2c_addr = hw_if.board_data->ub_i2c_addr;
+ else
+ hw_if.board_data->i2c_addr = i2c->addr;
+
+ return;
+}
+
+static int synaptics_rmi4_i2c_set_page(struct synaptics_rmi4_data *rmi4_data,
+ unsigned short addr)
+{
+ int retval = 0;
+ unsigned char retry;
+ unsigned char buf[PAGE_SELECT_LEN];
+ unsigned char page;
+ struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+ struct i2c_msg msg[2];
+
+ msg[0].addr = hw_if.board_data->i2c_addr;
+ msg[0].flags = 0;
+ msg[0].len = PAGE_SELECT_LEN;
+ msg[0].buf = buf;
+
+ page = ((addr >> 8) & MASK_8BIT);
+ buf[0] = MASK_8BIT;
+ buf[1] = page;
+
+ if (page != rmi4_data->current_page) {
+ for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
+ if (i2c_transfer(i2c->adapter, &msg[0], 1) == 1) {
+ rmi4_data->current_page = page;
+ retval = PAGE_SELECT_LEN;
+ break;
+ }
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: I2C retry %d\n",
+ __func__, retry + 1);
+ msleep(20);
+
+ if (retry == SYN_I2C_RETRY_TIMES / 2) {
+ synaptics_rmi4_i2c_check_addr(rmi4_data, i2c);
+ msg[0].addr = hw_if.board_data->i2c_addr;
+ }
+ }
+ } else {
+ retval = PAGE_SELECT_LEN;
+ }
+
+ return retval;
+}
+
+static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
+ unsigned short addr, unsigned char *data, unsigned int length)
+{
+ int retval = 0;
+ unsigned char retry;
+ unsigned char buf;
+ unsigned char index = 0;
+ unsigned char xfer_msgs;
+ unsigned char remaining_msgs;
+ unsigned short i2c_addr;
+ unsigned short data_offset = 0;
+ unsigned int remaining_length = length;
+ struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+ struct i2c_adapter *adap = i2c->adapter;
+ struct i2c_msg msg[rd_msgs + 1];
+
+ mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ retval = synaptics_rmi4_i2c_set_page(rmi4_data, addr);
+ if (retval != PAGE_SELECT_LEN) {
+ retval = -EIO;
+ goto exit;
+ }
+
+ msg[0].addr = hw_if.board_data->i2c_addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = &buf;
+ msg[rd_msgs].addr = hw_if.board_data->i2c_addr;
+ msg[rd_msgs].flags = I2C_M_RD;
+ msg[rd_msgs].len = (unsigned short)remaining_length;
+ msg[rd_msgs].buf = &data[data_offset];
+
+ buf = addr & MASK_8BIT;
+
+ remaining_msgs = rd_msgs + 1;
+
+ while (remaining_msgs) {
+ xfer_msgs = remaining_msgs;
+ for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
+ retval = i2c_transfer(adap, &msg[index], xfer_msgs);
+ if (retval == xfer_msgs)
+ break;
+
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: I2C retry %d\n",
+ __func__, retry + 1);
+ msleep(20);
+
+ if (retry == SYN_I2C_RETRY_TIMES / 2) {
+ synaptics_rmi4_i2c_check_addr(rmi4_data, i2c);
+ i2c_addr = hw_if.board_data->i2c_addr;
+ msg[0].addr = i2c_addr;
+ msg[rd_msgs].addr = i2c_addr;
+ }
+ }
+
+ if (retry == SYN_I2C_RETRY_TIMES) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: I2C read over retry limit\n",
+ __func__);
+ retval = -EIO;
+ goto exit;
+ }
+
+ remaining_msgs -= xfer_msgs;
+ index += xfer_msgs;
+ }
+
+ retval = length;
+
+exit:
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ return retval;
+}
+
+static int synaptics_rmi4_i2c_write(struct synaptics_rmi4_data *rmi4_data,
+ unsigned short addr, unsigned char *data, unsigned int length)
+{
+ int retval;
+ unsigned char retry;
+ struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+ struct i2c_msg msg[2];
+
+ retval = synaptics_rmi4_i2c_alloc_buf(rmi4_data, length + 1);
+ if (retval < 0)
+ return retval;
+
+ mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ retval = synaptics_rmi4_i2c_set_page(rmi4_data, addr);
+ if (retval != PAGE_SELECT_LEN) {
+ retval = -EIO;
+ goto exit;
+ }
+
+ msg[0].addr = hw_if.board_data->i2c_addr;
+ msg[0].flags = 0;
+ msg[0].len = (unsigned short)(length + 1);
+ msg[0].buf = wr_buf;
+
+ wr_buf[0] = addr & MASK_8BIT;
+ retval = secure_memcpy(&wr_buf[1], length, &data[0], length, length);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy data\n",
+ __func__);
+ goto exit;
+ }
+
+ for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
+ if (i2c_transfer(i2c->adapter, &msg[0], 1) == 1) {
+ retval = length;
+ break;
+ }
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: I2C retry %d\n",
+ __func__, retry + 1);
+ msleep(20);
+
+ if (retry == SYN_I2C_RETRY_TIMES / 2) {
+ synaptics_rmi4_i2c_check_addr(rmi4_data, i2c);
+ msg[0].addr = hw_if.board_data->i2c_addr;
+ }
+ }
+
+ if (retry == SYN_I2C_RETRY_TIMES) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: I2C write over retry limit\n",
+ __func__);
+ retval = -EIO;
+ }
+
+exit:
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ return retval;
+}
+
+static struct synaptics_dsx_bus_access bus_access = {
+ .type = BUS_I2C,
+ .read = synaptics_rmi4_i2c_read,
+ .write = synaptics_rmi4_i2c_write,
+};
+
+static void synaptics_rmi4_i2c_dev_release(struct device *dev)
+{
+ kfree(synaptics_dsx_i2c_device);
+
+ return;
+}
+
+static int synaptics_rmi4_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *dev_id)
+{
+ int retval;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev,
+ "%s: SMBus byte data commands not supported by host\n",
+ __func__);
+ return -EIO;
+ }
+
+ synaptics_dsx_i2c_device = kzalloc(
+ sizeof(struct platform_device),
+ GFP_KERNEL);
+ if (!synaptics_dsx_i2c_device) {
+ dev_err(&client->dev,
+ "%s: Failed to allocate memory for synaptics_dsx_i2c_device\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+#ifdef CONFIG_OF
+ if (client->dev.of_node) {
+ hw_if.board_data = devm_kzalloc(&client->dev,
+ sizeof(struct synaptics_dsx_board_data),
+ GFP_KERNEL);
+ if (!hw_if.board_data) {
+ dev_err(&client->dev,
+ "%s: Failed to allocate memory for board data\n",
+ __func__);
+ return -ENOMEM;
+ }
+ hw_if.board_data->cap_button_map = devm_kzalloc(&client->dev,
+ sizeof(struct synaptics_dsx_button_map),
+ GFP_KERNEL);
+ if (!hw_if.board_data->cap_button_map) {
+ dev_err(&client->dev,
+ "%s: Failed to allocate memory for 0D button map\n",
+ __func__);
+ return -ENOMEM;
+ }
+ hw_if.board_data->vir_button_map = devm_kzalloc(&client->dev,
+ sizeof(struct synaptics_dsx_button_map),
+ GFP_KERNEL);
+ if (!hw_if.board_data->vir_button_map) {
+ dev_err(&client->dev,
+ "%s: Failed to allocate memory for virtual button map\n",
+ __func__);
+ return -ENOMEM;
+ }
+ parse_dt(&client->dev, hw_if.board_data);
+ }
+#else
+ hw_if.board_data = client->dev.platform_data;
+#endif
+
+ hw_if.bus_access = &bus_access;
+ hw_if.board_data->i2c_addr = client->addr;
+
+ synaptics_dsx_i2c_device->name = PLATFORM_DRIVER_NAME;
+ synaptics_dsx_i2c_device->id = 0;
+ synaptics_dsx_i2c_device->num_resources = 0;
+ synaptics_dsx_i2c_device->dev.parent = &client->dev;
+ synaptics_dsx_i2c_device->dev.platform_data = &hw_if;
+ synaptics_dsx_i2c_device->dev.release = synaptics_rmi4_i2c_dev_release;
+
+ retval = platform_device_register(synaptics_dsx_i2c_device);
+ if (retval) {
+ dev_err(&client->dev,
+ "%s: Failed to register platform device\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int synaptics_rmi4_i2c_remove(struct i2c_client *client)
+{
+ platform_device_unregister(synaptics_dsx_i2c_device);
+
+ return 0;
+}
+
+static const struct i2c_device_id synaptics_rmi4_id_table[] = {
+ {I2C_DRIVER_NAME, 0},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, synaptics_rmi4_id_table);
+
+#ifdef CONFIG_OF
+static struct of_device_id synaptics_rmi4_of_match_table[] = {
+ {
+ .compatible = "synaptics,dsx-i2c",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, synaptics_rmi4_of_match_table);
+#else
+#define synaptics_rmi4_of_match_table NULL
+#endif
+
+static struct i2c_driver synaptics_rmi4_i2c_driver = {
+ .driver = {
+ .name = I2C_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = synaptics_rmi4_of_match_table,
+ },
+ .probe = synaptics_rmi4_i2c_probe,
+ .remove = synaptics_rmi4_i2c_remove,
+ .id_table = synaptics_rmi4_id_table,
+};
+
+int synaptics_rmi4_bus_init(void)
+{
+ return i2c_add_driver(&synaptics_rmi4_i2c_driver);
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_init);
+
+void synaptics_rmi4_bus_exit(void)
+{
+ kfree(wr_buf);
+
+ i2c_del_driver(&synaptics_rmi4_i2c_driver);
+
+ return;
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX I2C Bus Support Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_proximity.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_proximity.c
new file mode 100644
index 0000000..518b805
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_proximity.c
@@ -0,0 +1,692 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define PROX_PHYS_NAME "synaptics_dsx/proximity"
+
+#define HOVER_Z_MAX (255)
+
+#define HOVERING_FINGER_EN (1 << 4)
+
+static ssize_t synaptics_rmi4_hover_finger_en_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_hover_finger_en_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static struct device_attribute attrs[] = {
+ __ATTR(hover_finger_en, 0664,
+ synaptics_rmi4_hover_finger_en_show,
+ synaptics_rmi4_hover_finger_en_store),
+};
+
+struct synaptics_rmi4_f12_query_5 {
+ union {
+ struct {
+ unsigned char size_of_query6;
+ struct {
+ unsigned char ctrl0_is_present:1;
+ unsigned char ctrl1_is_present:1;
+ unsigned char ctrl2_is_present:1;
+ unsigned char ctrl3_is_present:1;
+ unsigned char ctrl4_is_present:1;
+ unsigned char ctrl5_is_present:1;
+ unsigned char ctrl6_is_present:1;
+ unsigned char ctrl7_is_present:1;
+ } __packed;
+ struct {
+ unsigned char ctrl8_is_present:1;
+ unsigned char ctrl9_is_present:1;
+ unsigned char ctrl10_is_present:1;
+ unsigned char ctrl11_is_present:1;
+ unsigned char ctrl12_is_present:1;
+ unsigned char ctrl13_is_present:1;
+ unsigned char ctrl14_is_present:1;
+ unsigned char ctrl15_is_present:1;
+ } __packed;
+ struct {
+ unsigned char ctrl16_is_present:1;
+ unsigned char ctrl17_is_present:1;
+ unsigned char ctrl18_is_present:1;
+ unsigned char ctrl19_is_present:1;
+ unsigned char ctrl20_is_present:1;
+ unsigned char ctrl21_is_present:1;
+ unsigned char ctrl22_is_present:1;
+ unsigned char ctrl23_is_present:1;
+ } __packed;
+ };
+ unsigned char data[4];
+ };
+};
+
+struct synaptics_rmi4_f12_query_8 {
+ union {
+ struct {
+ unsigned char size_of_query9;
+ struct {
+ unsigned char data0_is_present:1;
+ unsigned char data1_is_present:1;
+ unsigned char data2_is_present:1;
+ unsigned char data3_is_present:1;
+ unsigned char data4_is_present:1;
+ unsigned char data5_is_present:1;
+ unsigned char data6_is_present:1;
+ unsigned char data7_is_present:1;
+ } __packed;
+ };
+ unsigned char data[2];
+ };
+};
+
+struct prox_finger_data {
+ union {
+ struct {
+ unsigned char object_type_and_status;
+ unsigned char x_lsb;
+ unsigned char x_msb;
+ unsigned char y_lsb;
+ unsigned char y_msb;
+ unsigned char z;
+ } __packed;
+ unsigned char proximity_data[6];
+ };
+};
+
+struct synaptics_rmi4_prox_handle {
+ bool hover_finger_present;
+ bool hover_finger_en;
+ unsigned char intr_mask;
+ unsigned short query_base_addr;
+ unsigned short control_base_addr;
+ unsigned short data_base_addr;
+ unsigned short command_base_addr;
+ unsigned short hover_finger_en_addr;
+ unsigned short hover_finger_data_addr;
+ struct input_dev *prox_dev;
+ struct prox_finger_data *finger_data;
+ struct synaptics_rmi4_data *rmi4_data;
+};
+
+static struct synaptics_rmi4_prox_handle *prox;
+
+DECLARE_COMPLETION(prox_remove_complete);
+
+static void prox_hover_finger_lift(void)
+{
+ input_report_key(prox->prox_dev, BTN_TOUCH, 0);
+ input_report_key(prox->prox_dev, BTN_TOOL_FINGER, 0);
+ input_sync(prox->prox_dev);
+ prox->hover_finger_present = false;
+
+ return;
+}
+
+static void prox_hover_finger_report(void)
+{
+ int retval;
+ int x;
+ int y;
+ int z;
+ struct prox_finger_data *data;
+ struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+ data = prox->finger_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ prox->hover_finger_data_addr,
+ data->proximity_data,
+ sizeof(data->proximity_data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read hovering finger data\n",
+ __func__);
+ return;
+ }
+
+ if (data->object_type_and_status != F12_HOVERING_FINGER_STATUS) {
+ if (prox->hover_finger_present)
+ prox_hover_finger_lift();
+
+ return;
+ }
+
+ x = (data->x_msb << 8) | (data->x_lsb);
+ y = (data->y_msb << 8) | (data->y_lsb);
+ z = HOVER_Z_MAX - data->z;
+
+ input_report_key(prox->prox_dev, BTN_TOUCH, 0);
+ input_report_key(prox->prox_dev, BTN_TOOL_FINGER, 1);
+ input_report_abs(prox->prox_dev, ABS_X, x);
+ input_report_abs(prox->prox_dev, ABS_Y, y);
+ input_report_abs(prox->prox_dev, ABS_DISTANCE, z);
+
+ input_sync(prox->prox_dev);
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: x = %d y = %d z = %d\n",
+ __func__, x, y, z);
+
+ prox->hover_finger_present = true;
+
+ return;
+}
+
+static int prox_set_hover_finger_en(void)
+{
+ int retval;
+ unsigned char object_report_enable;
+ struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ prox->hover_finger_en_addr,
+ &object_report_enable,
+ sizeof(object_report_enable));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read from object report enable register\n",
+ __func__);
+ return retval;
+ }
+
+ if (prox->hover_finger_en)
+ object_report_enable |= HOVERING_FINGER_EN;
+ else
+ object_report_enable &= ~HOVERING_FINGER_EN;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ prox->hover_finger_en_addr,
+ &object_report_enable,
+ sizeof(object_report_enable));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write to object report enable register\n",
+ __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+static void prox_set_params(void)
+{
+ input_set_abs_params(prox->prox_dev, ABS_X, 0,
+ prox->rmi4_data->sensor_max_x, 0, 0);
+ input_set_abs_params(prox->prox_dev, ABS_Y, 0,
+ prox->rmi4_data->sensor_max_y, 0, 0);
+ input_set_abs_params(prox->prox_dev, ABS_DISTANCE, 0,
+ HOVER_Z_MAX, 0, 0);
+
+ return;
+}
+
+static int prox_reg_init(void)
+{
+ int retval;
+ unsigned char ctrl_23_offset;
+ unsigned char data_1_offset;
+ struct synaptics_rmi4_f12_query_5 query_5;
+ struct synaptics_rmi4_f12_query_8 query_8;
+ struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ prox->query_base_addr + 5,
+ query_5.data,
+ sizeof(query_5.data));
+ if (retval < 0)
+ return retval;
+
+ ctrl_23_offset = query_5.ctrl0_is_present +
+ query_5.ctrl1_is_present +
+ query_5.ctrl2_is_present +
+ query_5.ctrl3_is_present +
+ query_5.ctrl4_is_present +
+ query_5.ctrl5_is_present +
+ query_5.ctrl6_is_present +
+ query_5.ctrl7_is_present +
+ query_5.ctrl8_is_present +
+ query_5.ctrl9_is_present +
+ query_5.ctrl10_is_present +
+ query_5.ctrl11_is_present +
+ query_5.ctrl12_is_present +
+ query_5.ctrl13_is_present +
+ query_5.ctrl14_is_present +
+ query_5.ctrl15_is_present +
+ query_5.ctrl16_is_present +
+ query_5.ctrl17_is_present +
+ query_5.ctrl18_is_present +
+ query_5.ctrl19_is_present +
+ query_5.ctrl20_is_present +
+ query_5.ctrl21_is_present +
+ query_5.ctrl22_is_present;
+
+ prox->hover_finger_en_addr = prox->control_base_addr + ctrl_23_offset;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ prox->query_base_addr + 8,
+ query_8.data,
+ sizeof(query_8.data));
+ if (retval < 0)
+ return retval;
+
+ data_1_offset = query_8.data0_is_present;
+ prox->hover_finger_data_addr = prox->data_base_addr + data_1_offset;
+
+ return retval;
+}
+
+static int prox_scan_pdt(void)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char page;
+ unsigned char intr_count = 0;
+ unsigned char intr_off;
+ unsigned char intr_src;
+ unsigned short addr;
+ struct synaptics_rmi4_fn_desc fd;
+ struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+ for (page = 0; page < PAGES_TO_SERVICE; page++) {
+ for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+ addr |= (page << 8);
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ addr,
+ (unsigned char *)&fd,
+ sizeof(fd));
+ if (retval < 0)
+ return retval;
+
+ addr &= ~(MASK_8BIT << 8);
+
+ if (fd.fn_number) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Found F%02x\n",
+ __func__, fd.fn_number);
+ switch (fd.fn_number) {
+ case SYNAPTICS_RMI4_F12:
+ goto f12_found;
+ break;
+ }
+ } else {
+ break;
+ }
+
+ intr_count += fd.intr_src_count;
+ }
+ }
+
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to find F12\n",
+ __func__);
+ return -EINVAL;
+
+f12_found:
+ prox->query_base_addr = fd.query_base_addr | (page << 8);
+ prox->control_base_addr = fd.ctrl_base_addr | (page << 8);
+ prox->data_base_addr = fd.data_base_addr | (page << 8);
+ prox->command_base_addr = fd.cmd_base_addr | (page << 8);
+
+ retval = prox_reg_init();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to initialize proximity registers\n",
+ __func__);
+ return retval;
+ }
+
+ prox->intr_mask = 0;
+ intr_src = fd.intr_src_count;
+ intr_off = intr_count % 8;
+ for (ii = intr_off;
+ ii < (intr_src + intr_off);
+ ii++) {
+ prox->intr_mask |= 1 << ii;
+ }
+
+ rmi4_data->intr_mask[0] |= prox->intr_mask;
+
+ addr = rmi4_data->f01_ctrl_base_addr + 1;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ addr,
+ &(rmi4_data->intr_mask[0]),
+ sizeof(rmi4_data->intr_mask[0]));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set interrupt enable bit\n",
+ __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+static ssize_t synaptics_rmi4_hover_finger_en_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if (!prox)
+ return -ENODEV;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ prox->hover_finger_en);
+}
+
+static ssize_t synaptics_rmi4_hover_finger_en_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned int input;
+ struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+ if (!prox)
+ return -ENODEV;
+
+ if (kstrtouint(buf, 16, &input) != 1)
+ return -EINVAL;
+
+ if (input == 1)
+ prox->hover_finger_en = true;
+ else if (input == 0)
+ prox->hover_finger_en = false;
+ else
+ return -EINVAL;
+
+ retval = prox_set_hover_finger_en();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to change hovering finger enable setting\n",
+ __func__);
+ return retval;
+ }
+
+ return count;
+}
+
+int synaptics_rmi4_prox_hover_finger_en(bool enable)
+{
+ int retval;
+
+ if (!prox)
+ return -ENODEV;
+
+ prox->hover_finger_en = enable;
+
+ retval = prox_set_hover_finger_en();
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+EXPORT_SYMBOL(synaptics_rmi4_prox_hover_finger_en);
+
+static void synaptics_rmi4_prox_attn(struct synaptics_rmi4_data *rmi4_data,
+ unsigned char intr_mask)
+{
+ if (!prox)
+ return;
+
+ if (prox->intr_mask & intr_mask)
+ prox_hover_finger_report();
+
+ return;
+}
+
+static int synaptics_rmi4_prox_init(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned char attr_count;
+
+ if (prox) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Handle already exists\n",
+ __func__);
+ return 0;
+ }
+
+ prox = kzalloc(sizeof(*prox), GFP_KERNEL);
+ if (!prox) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for prox\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ prox->finger_data = kzalloc(sizeof(*(prox->finger_data)), GFP_KERNEL);
+ if (!prox->finger_data) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for finger_data\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit_free_prox;
+ }
+
+ prox->rmi4_data = rmi4_data;
+
+ retval = prox_scan_pdt();
+ if (retval < 0)
+ goto exit_free_finger_data;
+
+ prox->hover_finger_en = true;
+
+ retval = prox_set_hover_finger_en();
+ if (retval < 0)
+ return retval;
+
+ prox->prox_dev = input_allocate_device();
+ if (prox->prox_dev == NULL) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to allocate proximity device\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit_free_finger_data;
+ }
+
+ prox->prox_dev->name = PROXIMITY_DRIVER_NAME;
+ prox->prox_dev->phys = PROX_PHYS_NAME;
+ prox->prox_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+ prox->prox_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+ prox->prox_dev->dev.parent = rmi4_data->pdev->dev.parent;
+ input_set_drvdata(prox->prox_dev, rmi4_data);
+
+ set_bit(EV_KEY, prox->prox_dev->evbit);
+ set_bit(EV_ABS, prox->prox_dev->evbit);
+ set_bit(BTN_TOUCH, prox->prox_dev->keybit);
+ set_bit(BTN_TOOL_FINGER, prox->prox_dev->keybit);
+#ifdef INPUT_PROP_DIRECT
+ set_bit(INPUT_PROP_DIRECT, prox->prox_dev->propbit);
+#endif
+
+ prox_set_params();
+
+ retval = input_register_device(prox->prox_dev);
+ if (retval) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to register proximity device\n",
+ __func__);
+ goto exit_free_input_device;
+ }
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create sysfs attributes\n",
+ __func__);
+ goto exit_free_sysfs;
+ }
+ }
+
+ return 0;
+
+exit_free_sysfs:
+ for (attr_count--; attr_count >= 0; attr_count--) {
+ sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ }
+
+ input_unregister_device(prox->prox_dev);
+ prox->prox_dev = NULL;
+
+exit_free_input_device:
+ if (prox->prox_dev)
+ input_free_device(prox->prox_dev);
+
+exit_free_finger_data:
+ kfree(prox->finger_data);
+
+exit_free_prox:
+ kfree(prox);
+ prox = NULL;
+
+exit:
+ return retval;
+}
+
+static void synaptics_rmi4_prox_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+ unsigned char attr_count;
+
+ if (!prox)
+ goto exit;
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ }
+
+ input_unregister_device(prox->prox_dev);
+ kfree(prox->finger_data);
+ kfree(prox);
+ prox = NULL;
+
+exit:
+ complete(&prox_remove_complete);
+
+ return;
+}
+
+static void synaptics_rmi4_prox_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!prox) {
+ synaptics_rmi4_prox_init(rmi4_data);
+ return;
+ }
+
+ prox_hover_finger_lift();
+
+ prox_scan_pdt();
+
+ prox_set_hover_finger_en();
+
+ return;
+}
+
+static void synaptics_rmi4_prox_reinit(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!prox)
+ return;
+
+ prox_hover_finger_lift();
+
+ prox_set_hover_finger_en();
+
+ return;
+}
+
+static void synaptics_rmi4_prox_e_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!prox)
+ return;
+
+ prox_hover_finger_lift();
+
+ return;
+}
+
+static void synaptics_rmi4_prox_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!prox)
+ return;
+
+ prox_hover_finger_lift();
+
+ return;
+}
+
+static struct synaptics_rmi4_exp_fn proximity_module = {
+ .fn_type = RMI_PROXIMITY,
+ .init = synaptics_rmi4_prox_init,
+ .remove = synaptics_rmi4_prox_remove,
+ .reset = synaptics_rmi4_prox_reset,
+ .reinit = synaptics_rmi4_prox_reinit,
+ .early_suspend = synaptics_rmi4_prox_e_suspend,
+ .suspend = synaptics_rmi4_prox_suspend,
+ .resume = NULL,
+ .late_resume = NULL,
+ .attn = synaptics_rmi4_prox_attn,
+};
+
+static int __init rmi4_proximity_module_init(void)
+{
+ synaptics_rmi4_new_function(&proximity_module, true);
+
+ return 0;
+}
+
+static void __exit rmi4_proximity_module_exit(void)
+{
+ synaptics_rmi4_new_function(&proximity_module, false);
+
+ wait_for_completion(&prox_remove_complete);
+
+ return;
+}
+
+module_init(rmi4_proximity_module_init);
+module_exit(rmi4_proximity_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Proximity Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_dev.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_dev.c
new file mode 100644
index 0000000..61cf979
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_dev.c
@@ -0,0 +1,1064 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/gpio.h>
+#include <linux/uaccess.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define CHAR_DEVICE_NAME "rmi"
+#define DEVICE_CLASS_NAME "rmidev"
+#define SYSFS_FOLDER_NAME "rmidev"
+#define DEV_NUMBER 1
+#define REG_ADDR_LIMIT 0xFFFF
+
+#define RMIDEV_MAJOR_NUM 0
+
+static ssize_t rmidev_sysfs_data_show(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count);
+
+static ssize_t rmidev_sysfs_data_store(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count);
+
+static ssize_t rmidev_sysfs_open_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_release_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_attn_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t rmidev_sysfs_pid_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t rmidev_sysfs_pid_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_term_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_intr_mask_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t rmidev_sysfs_intr_mask_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_concurrent_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t rmidev_sysfs_concurrent_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+struct rmidev_handle {
+ dev_t dev_no;
+ pid_t pid;
+ unsigned char intr_mask;
+ unsigned char *tmpbuf;
+ unsigned int tmpbuf_size;
+ struct device dev;
+ struct synaptics_rmi4_data *rmi4_data;
+ struct kobject *sysfs_dir;
+ struct siginfo interrupt_signal;
+ struct siginfo terminate_signal;
+ struct task_struct *task;
+ void *data;
+ bool concurrent;
+};
+
+struct rmidev_data {
+ int ref_count;
+ struct cdev main_dev;
+ struct class *device_class;
+ struct mutex file_mutex;
+ struct rmidev_handle *rmi_dev;
+};
+
+static struct bin_attribute attr_data = {
+ .attr = {
+ .name = "data",
+ .mode = 0664,
+ },
+ .size = 0,
+ .read = rmidev_sysfs_data_show,
+ .write = rmidev_sysfs_data_store,
+};
+
+static struct device_attribute attrs[] = {
+ __ATTR(open, 0220,
+ synaptics_rmi4_show_error,
+ rmidev_sysfs_open_store),
+ __ATTR(release, 0220,
+ synaptics_rmi4_show_error,
+ rmidev_sysfs_release_store),
+ __ATTR(attn_state, 0444,
+ rmidev_sysfs_attn_state_show,
+ synaptics_rmi4_store_error),
+ __ATTR(pid, 0664,
+ rmidev_sysfs_pid_show,
+ rmidev_sysfs_pid_store),
+ __ATTR(term, 0220,
+ synaptics_rmi4_show_error,
+ rmidev_sysfs_term_store),
+ __ATTR(intr_mask, 0664,
+ rmidev_sysfs_intr_mask_show,
+ rmidev_sysfs_intr_mask_store),
+ __ATTR(concurrent, 0664,
+ rmidev_sysfs_concurrent_show,
+ rmidev_sysfs_concurrent_store),
+};
+
+static int rmidev_major_num = RMIDEV_MAJOR_NUM;
+
+static struct class *rmidev_device_class;
+
+static struct rmidev_handle *rmidev;
+
+DECLARE_COMPLETION(rmidev_remove_complete);
+
+static irqreturn_t rmidev_sysfs_irq(int irq, void *data)
+{
+ struct synaptics_rmi4_data *rmi4_data = data;
+
+ sysfs_notify(&rmi4_data->input_dev->dev.kobj,
+ SYSFS_FOLDER_NAME, "attn_state");
+
+ return IRQ_HANDLED;
+}
+
+static int rmidev_sysfs_irq_enable(struct synaptics_rmi4_data *rmi4_data,
+ bool enable)
+{
+ int retval = 0;
+ unsigned char intr_status[MAX_INTR_REGISTERS];
+ unsigned long irq_flags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
+ IRQF_ONESHOT;
+
+ mutex_lock(&(rmi4_data->rmi4_irq_enable_mutex));
+
+ if (enable) {
+ if (rmi4_data->irq_enabled) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Interrupt already enabled\n",
+ __func__);
+ goto exit;
+ }
+
+ /* Clear interrupts first */
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_data_base_addr + 1,
+ intr_status,
+ rmi4_data->num_of_intr_regs);
+ if (retval < 0)
+ goto exit;
+
+ retval = request_threaded_irq(rmi4_data->irq, NULL,
+ rmidev_sysfs_irq, irq_flags,
+ PLATFORM_DRIVER_NAME, rmi4_data);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create irq thread\n",
+ __func__);
+ goto exit;
+ }
+
+ rmi4_data->irq_enabled = true;
+ } else {
+ if (rmi4_data->irq_enabled) {
+ disable_irq(rmi4_data->irq);
+ free_irq(rmi4_data->irq, rmi4_data);
+ rmi4_data->irq_enabled = false;
+ }
+ }
+
+exit:
+ mutex_unlock(&(rmi4_data->rmi4_irq_enable_mutex));
+
+ return retval;
+}
+
+static ssize_t rmidev_sysfs_data_show(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count)
+{
+ int retval;
+ unsigned char intr_status = 0;
+ unsigned int length = (unsigned int)count;
+ unsigned short address = (unsigned short)pos;
+ struct synaptics_rmi4_fn *fhandler;
+ struct synaptics_rmi4_device_info *rmi;
+ struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+ rmi = &(rmi4_data->rmi4_mod_info);
+
+ if (length > (REG_ADDR_LIMIT - address)) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Out of register map limit\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (length) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ address,
+ (unsigned char *)buf,
+ length);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read data\n",
+ __func__);
+ return retval;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ if (!rmidev->concurrent)
+ goto exit;
+
+ if (address != rmi4_data->f01_data_base_addr)
+ goto exit;
+
+ if (length <= 1)
+ goto exit;
+
+ intr_status = buf[1];
+
+ if (!list_empty(&rmi->support_fn_list)) {
+ list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+ if (fhandler->num_of_data_sources) {
+ if (fhandler->intr_mask & intr_status) {
+ rmi4_data->report_touch(rmi4_data,
+ fhandler);
+ }
+ }
+ }
+ }
+
+exit:
+ return length;
+}
+
+static ssize_t rmidev_sysfs_data_store(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count)
+{
+ int retval;
+ unsigned int length = (unsigned int)count;
+ unsigned short address = (unsigned short)pos;
+ struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+ if (length > (REG_ADDR_LIMIT - address)) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Out of register map limit\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (length) {
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ address,
+ (unsigned char *)buf,
+ length);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write data\n",
+ __func__);
+ return retval;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return length;
+}
+
+static ssize_t rmidev_sysfs_open_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned int input;
+ struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+ if (kstrtouint(buf, 10, &input) != 1)
+ return -EINVAL;
+
+ if (input != 1)
+ return -EINVAL;
+
+ if (rmi4_data->sensor_sleep) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Sensor sleeping\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ rmi4_data->stay_awake = true;
+
+ rmi4_data->irq_enable(rmi4_data, false, false);
+ rmidev_sysfs_irq_enable(rmi4_data, true);
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Attention interrupt disabled\n",
+ __func__);
+
+ return count;
+}
+
+static ssize_t rmidev_sysfs_release_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned int input;
+ struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+ if (kstrtouint(buf, 10, &input) != 1)
+ return -EINVAL;
+
+ if (input != 1)
+ return -EINVAL;
+
+ rmidev_sysfs_irq_enable(rmi4_data, false);
+
+ rmi4_data->reset_device(rmi4_data, false);
+
+ rmi4_data->stay_awake = false;
+
+ return count;
+}
+
+static ssize_t rmidev_sysfs_attn_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int attn_state;
+ struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ attn_state = gpio_get_value(bdata->irq_gpio);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", attn_state);
+}
+
+static ssize_t rmidev_sysfs_pid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", rmidev->pid);
+}
+
+static ssize_t rmidev_sysfs_pid_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned int input;
+ struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+ if (kstrtouint(buf, 10, &input) != 1)
+ return -EINVAL;
+
+ rmidev->pid = input;
+
+ if (rmidev->pid) {
+ rmidev->task = pid_task(find_vpid(rmidev->pid), PIDTYPE_PID);
+ if (!rmidev->task) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to locate PID of data logging tool\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ return count;
+}
+
+static ssize_t rmidev_sysfs_term_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned int input;
+
+ if (kstrtouint(buf, 10, &input) != 1)
+ return -EINVAL;
+
+ if (input != 1)
+ return -EINVAL;
+
+ if (rmidev->pid)
+ send_sig_info(SIGTERM, &rmidev->terminate_signal, rmidev->task);
+
+ return count;
+}
+
+static ssize_t rmidev_sysfs_intr_mask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%02x\n", rmidev->intr_mask);
+}
+
+static ssize_t rmidev_sysfs_intr_mask_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned int input;
+
+ if (kstrtouint(buf, 10, &input) != 1)
+ return -EINVAL;
+
+ rmidev->intr_mask = (unsigned char)input;
+
+ return count;
+}
+
+static ssize_t rmidev_sysfs_concurrent_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", rmidev->concurrent);
+}
+
+static ssize_t rmidev_sysfs_concurrent_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned int input;
+
+ if (kstrtouint(buf, 10, &input) != 1)
+ return -EINVAL;
+
+ rmidev->concurrent = input > 0 ? true : false;
+
+ return count;
+}
+
+static int rmidev_allocate_buffer(int count)
+{
+ if (count + 1 > rmidev->tmpbuf_size) {
+ if (rmidev->tmpbuf_size)
+ kfree(rmidev->tmpbuf);
+ rmidev->tmpbuf = kzalloc(count + 1, GFP_KERNEL);
+ if (!rmidev->tmpbuf) {
+ dev_err(rmidev->rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for buffer\n",
+ __func__);
+ rmidev->tmpbuf_size = 0;
+ return -ENOMEM;
+ }
+ rmidev->tmpbuf_size = count + 1;
+ }
+
+ return 0;
+}
+
+/*
+ * rmidev_llseek - set register address to access for RMI device
+ *
+ * @filp: pointer to file structure
+ * @off:
+ * if whence == SEEK_SET,
+ * off: 16-bit RMI register address
+ * if whence == SEEK_CUR,
+ * off: offset from current position
+ * if whence == SEEK_END,
+ * off: offset from end position (0xFFFF)
+ * @whence: SEEK_SET, SEEK_CUR, or SEEK_END
+ */
+static loff_t rmidev_llseek(struct file *filp, loff_t off, int whence)
+{
+ loff_t newpos;
+ struct rmidev_data *dev_data = filp->private_data;
+ struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+ if (IS_ERR(dev_data)) {
+ pr_err("%s: Pointer of char device data is invalid", __func__);
+ return -EBADF;
+ }
+
+ mutex_lock(&(dev_data->file_mutex));
+
+ switch (whence) {
+ case SEEK_SET:
+ newpos = off;
+ break;
+ case SEEK_CUR:
+ newpos = filp->f_pos + off;
+ break;
+ case SEEK_END:
+ newpos = REG_ADDR_LIMIT + off;
+ break;
+ default:
+ newpos = -EINVAL;
+ goto clean_up;
+ }
+
+ if (newpos < 0 || newpos > REG_ADDR_LIMIT) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: New position 0x%04x is invalid\n",
+ __func__, (unsigned int)newpos);
+ newpos = -EINVAL;
+ goto clean_up;
+ }
+
+ filp->f_pos = newpos;
+
+clean_up:
+ mutex_unlock(&(dev_data->file_mutex));
+
+ return newpos;
+}
+
+/*
+ * rmidev_read: read register data from RMI device
+ *
+ * @filp: pointer to file structure
+ * @buf: pointer to user space buffer
+ * @count: number of bytes to read
+ * @f_pos: starting RMI register address
+ */
+static ssize_t rmidev_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ ssize_t retval;
+ unsigned char intr_status = 0;
+ unsigned short address;
+ struct rmidev_data *dev_data = filp->private_data;
+ struct synaptics_rmi4_fn *fhandler;
+ struct synaptics_rmi4_device_info *rmi;
+ struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+ rmi = &(rmi4_data->rmi4_mod_info);
+
+ if (IS_ERR(dev_data)) {
+ pr_err("%s: Pointer of char device data is invalid", __func__);
+ return -EBADF;
+ }
+
+ if (count == 0)
+ return 0;
+
+ if (count > (REG_ADDR_LIMIT - *f_pos))
+ count = REG_ADDR_LIMIT - *f_pos;
+
+ address = (unsigned short)(*f_pos);
+
+ mutex_lock(&(dev_data->file_mutex));
+
+ rmidev_allocate_buffer(count);
+
+ retval = synaptics_rmi4_reg_read(rmidev->rmi4_data,
+ *f_pos,
+ rmidev->tmpbuf,
+ count);
+ if (retval < 0)
+ goto clean_up;
+
+ if (copy_to_user(buf, rmidev->tmpbuf, count))
+ retval = -EFAULT;
+ else
+ *f_pos += retval;
+
+ if (!rmidev->concurrent)
+ goto clean_up;
+
+ if (address != rmi4_data->f01_data_base_addr)
+ goto clean_up;
+
+ if (count <= 1)
+ goto clean_up;
+
+ intr_status = rmidev->tmpbuf[1];
+
+ if (!list_empty(&rmi->support_fn_list)) {
+ list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+ if (fhandler->num_of_data_sources) {
+ if (fhandler->intr_mask & intr_status) {
+ rmi4_data->report_touch(rmi4_data,
+ fhandler);
+ }
+ }
+ }
+ }
+
+clean_up:
+ mutex_unlock(&(dev_data->file_mutex));
+
+ return retval;
+}
+
+/*
+ * rmidev_write: write register data to RMI device
+ *
+ * @filp: pointer to file structure
+ * @buf: pointer to user space buffer
+ * @count: number of bytes to write
+ * @f_pos: starting RMI register address
+ */
+static ssize_t rmidev_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ ssize_t retval;
+ struct rmidev_data *dev_data = filp->private_data;
+
+ if (IS_ERR(dev_data)) {
+ pr_err("%s: Pointer of char device data is invalid", __func__);
+ return -EBADF;
+ }
+
+ if (count == 0)
+ return 0;
+
+ if (count > (REG_ADDR_LIMIT - *f_pos))
+ count = REG_ADDR_LIMIT - *f_pos;
+
+ mutex_lock(&(dev_data->file_mutex));
+
+ rmidev_allocate_buffer(count);
+
+ if (copy_from_user(rmidev->tmpbuf, buf, count))
+ return -EFAULT;
+
+ retval = synaptics_rmi4_reg_write(rmidev->rmi4_data,
+ *f_pos,
+ rmidev->tmpbuf,
+ count);
+ if (retval >= 0)
+ *f_pos += retval;
+
+ mutex_unlock(&(dev_data->file_mutex));
+
+ return retval;
+}
+
+static int rmidev_open(struct inode *inp, struct file *filp)
+{
+ int retval = 0;
+ struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+ struct rmidev_data *dev_data =
+ container_of(inp->i_cdev, struct rmidev_data, main_dev);
+
+ if (!dev_data)
+ return -EACCES;
+
+ if (rmi4_data->sensor_sleep) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Sensor sleeping\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ rmi4_data->stay_awake = true;
+
+ filp->private_data = dev_data;
+
+ mutex_lock(&(dev_data->file_mutex));
+
+ rmi4_data->irq_enable(rmi4_data, false, false);
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Attention interrupt disabled\n",
+ __func__);
+
+ if (dev_data->ref_count < 1)
+ dev_data->ref_count++;
+ else
+ retval = -EACCES;
+
+ mutex_unlock(&(dev_data->file_mutex));
+
+ return retval;
+}
+
+static int rmidev_release(struct inode *inp, struct file *filp)
+{
+ struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+ struct rmidev_data *dev_data =
+ container_of(inp->i_cdev, struct rmidev_data, main_dev);
+
+ if (!dev_data)
+ return -EACCES;
+
+ mutex_lock(&(dev_data->file_mutex));
+
+ dev_data->ref_count--;
+ if (dev_data->ref_count < 0)
+ dev_data->ref_count = 0;
+
+ rmi4_data->reset_device(rmi4_data, false);
+
+ rmi4_data->stay_awake = false;
+
+ mutex_unlock(&(dev_data->file_mutex));
+
+ return 0;
+}
+
+static const struct file_operations rmidev_fops = {
+ .owner = THIS_MODULE,
+ .llseek = rmidev_llseek,
+ .read = rmidev_read,
+ .write = rmidev_write,
+ .open = rmidev_open,
+ .release = rmidev_release,
+};
+
+static void rmidev_device_cleanup(struct rmidev_data *dev_data)
+{
+ dev_t devno;
+ struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+ if (dev_data) {
+ devno = dev_data->main_dev.dev;
+
+ if (dev_data->device_class)
+ device_destroy(dev_data->device_class, devno);
+
+ cdev_del(&dev_data->main_dev);
+
+ unregister_chrdev_region(devno, 1);
+
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: rmidev device removed\n",
+ __func__);
+ }
+
+ return;
+}
+
+static char *rmi_char_devnode(struct device *dev, umode_t *mode)
+{
+ if (!mode)
+ return NULL;
+
+ *mode = (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH);
+
+ return kasprintf(GFP_KERNEL, "rmi/%s", dev_name(dev));
+}
+
+static int rmidev_create_device_class(void)
+{
+ if (rmidev_device_class != NULL)
+ return 0;
+
+ rmidev_device_class = class_create(THIS_MODULE, DEVICE_CLASS_NAME);
+
+ if (IS_ERR(rmidev_device_class)) {
+ pr_err("%s: Failed to create /dev/%s\n",
+ __func__, CHAR_DEVICE_NAME);
+ return -ENODEV;
+ }
+
+ rmidev_device_class->devnode = rmi_char_devnode;
+
+ return 0;
+}
+
+static void rmidev_attn(struct synaptics_rmi4_data *rmi4_data,
+ unsigned char intr_mask)
+{
+ if (!rmidev)
+ return;
+
+ if (rmidev->pid && (rmidev->intr_mask & intr_mask))
+ send_sig_info(SIGIO, &rmidev->interrupt_signal, rmidev->task);
+
+ return;
+}
+
+static int rmidev_init_device(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ dev_t dev_no;
+ unsigned char attr_count;
+ struct rmidev_data *dev_data;
+ struct device *device_ptr;
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ if (rmidev) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Handle already exists\n",
+ __func__);
+ return 0;
+ }
+
+ rmidev = kzalloc(sizeof(*rmidev), GFP_KERNEL);
+ if (!rmidev) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for rmidev\n",
+ __func__);
+ retval = -ENOMEM;
+ goto err_rmidev;
+ }
+
+ rmidev->rmi4_data = rmi4_data;
+
+ memset(&rmidev->interrupt_signal, 0, sizeof(rmidev->interrupt_signal));
+ rmidev->interrupt_signal.si_signo = SIGIO;
+ rmidev->interrupt_signal.si_code = SI_USER;
+
+ memset(&rmidev->terminate_signal, 0, sizeof(rmidev->terminate_signal));
+ rmidev->terminate_signal.si_signo = SIGTERM;
+ rmidev->terminate_signal.si_code = SI_USER;
+
+ retval = rmidev_create_device_class();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create device class\n",
+ __func__);
+ goto err_device_class;
+ }
+
+ if (rmidev_major_num) {
+ dev_no = MKDEV(rmidev_major_num, DEV_NUMBER);
+ retval = register_chrdev_region(dev_no, 1, CHAR_DEVICE_NAME);
+ } else {
+ retval = alloc_chrdev_region(&dev_no, 0, 1, CHAR_DEVICE_NAME);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to allocate char device region\n",
+ __func__);
+ goto err_device_region;
+ }
+
+ rmidev_major_num = MAJOR(dev_no);
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Major number of rmidev = %d\n",
+ __func__, rmidev_major_num);
+ }
+
+ dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
+ if (!dev_data) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for dev_data\n",
+ __func__);
+ retval = -ENOMEM;
+ goto err_dev_data;
+ }
+
+ mutex_init(&dev_data->file_mutex);
+ dev_data->rmi_dev = rmidev;
+ rmidev->data = dev_data;
+
+ cdev_init(&dev_data->main_dev, &rmidev_fops);
+
+ retval = cdev_add(&dev_data->main_dev, dev_no, 1);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to add rmi char device\n",
+ __func__);
+ goto err_char_device;
+ }
+
+ dev_set_name(&rmidev->dev, "rmidev%d", MINOR(dev_no));
+ dev_data->device_class = rmidev_device_class;
+
+ device_ptr = device_create(dev_data->device_class, NULL, dev_no,
+ NULL, CHAR_DEVICE_NAME"%d", MINOR(dev_no));
+ if (IS_ERR(device_ptr)) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create rmi char device\n",
+ __func__);
+ retval = -ENODEV;
+ goto err_char_device;
+ }
+
+ retval = gpio_export(bdata->irq_gpio, false);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to export attention gpio\n",
+ __func__);
+ } else {
+ retval = gpio_export_link(&(rmi4_data->input_dev->dev),
+ "attn", bdata->irq_gpio);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s Failed to create gpio symlink\n",
+ __func__);
+ } else {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Exported attention gpio %d\n",
+ __func__, bdata->irq_gpio);
+ }
+ }
+
+ rmidev->sysfs_dir = kobject_create_and_add(SYSFS_FOLDER_NAME,
+ &rmi4_data->input_dev->dev.kobj);
+ if (!rmidev->sysfs_dir) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create sysfs directory\n",
+ __func__);
+ retval = -ENODEV;
+ goto err_sysfs_dir;
+ }
+
+ retval = sysfs_create_bin_file(rmidev->sysfs_dir,
+ &attr_data);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create sysfs bin file\n",
+ __func__);
+ goto err_sysfs_bin;
+ }
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ retval = sysfs_create_file(rmidev->sysfs_dir,
+ &attrs[attr_count].attr);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create sysfs attributes\n",
+ __func__);
+ retval = -ENODEV;
+ goto err_sysfs_attrs;
+ }
+ }
+
+ return 0;
+
+err_sysfs_attrs:
+ for (attr_count--; attr_count >= 0; attr_count--)
+ sysfs_remove_file(rmidev->sysfs_dir, &attrs[attr_count].attr);
+
+ sysfs_remove_bin_file(rmidev->sysfs_dir, &attr_data);
+
+err_sysfs_bin:
+ kobject_put(rmidev->sysfs_dir);
+
+err_sysfs_dir:
+ sysfs_remove_link(&(rmi4_data->input_dev->dev.kobj), "attn");
+ gpio_unexport(bdata->irq_gpio);
+
+err_char_device:
+ rmidev_device_cleanup(dev_data);
+ kfree(dev_data);
+
+err_dev_data:
+ unregister_chrdev_region(dev_no, 1);
+
+err_device_region:
+ if (rmidev_device_class != NULL) {
+ class_destroy(rmidev_device_class);
+ rmidev_device_class = NULL;
+ }
+
+err_device_class:
+ kfree(rmidev);
+ rmidev = NULL;
+
+err_rmidev:
+ return retval;
+}
+
+static void rmidev_remove_device(struct synaptics_rmi4_data *rmi4_data)
+{
+ unsigned char attr_count;
+ struct rmidev_data *dev_data;
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ if (!rmidev)
+ goto exit;
+
+ rmidev_major_num = RMIDEV_MAJOR_NUM;
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++)
+ sysfs_remove_file(rmidev->sysfs_dir, &attrs[attr_count].attr);
+
+ sysfs_remove_bin_file(rmidev->sysfs_dir, &attr_data);
+
+ kobject_put(rmidev->sysfs_dir);
+
+ sysfs_remove_link(&(rmi4_data->input_dev->dev.kobj), "attn");
+ gpio_unexport(bdata->irq_gpio);
+
+ dev_data = rmidev->data;
+ if (dev_data) {
+ rmidev_device_cleanup(dev_data);
+ kfree(dev_data);
+ }
+
+ unregister_chrdev_region(rmidev->dev_no, 1);
+
+ if (rmidev_device_class != NULL) {
+ class_destroy(rmidev_device_class);
+ rmidev_device_class = NULL;
+ }
+
+ kfree(rmidev->tmpbuf);
+
+ kfree(rmidev);
+ rmidev = NULL;
+
+exit:
+ complete(&rmidev_remove_complete);
+
+ return;
+}
+
+static struct synaptics_rmi4_exp_fn rmidev_module = {
+ .fn_type = RMI_DEV,
+ .init = rmidev_init_device,
+ .remove = rmidev_remove_device,
+ .reset = NULL,
+ .reinit = NULL,
+ .early_suspend = NULL,
+ .suspend = NULL,
+ .resume = NULL,
+ .late_resume = NULL,
+ .attn = rmidev_attn,
+};
+
+static int __init rmidev_module_init(void)
+{
+ synaptics_rmi4_new_function(&rmidev_module, true);
+
+ return 0;
+}
+
+static void __exit rmidev_module_exit(void)
+{
+ synaptics_rmi4_new_function(&rmidev_module, false);
+
+ wait_for_completion(&rmidev_remove_complete);
+
+ return;
+}
+
+module_init(rmidev_module_init);
+module_exit(rmidev_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX RMI Dev Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_hid_i2c.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_hid_i2c.c
new file mode 100644
index 0000000..244e97e
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_hid_i2c.c
@@ -0,0 +1,1006 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/gpio.h>
+#include <linux/types.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define SYN_I2C_RETRY_TIMES 10
+
+#define REPORT_ID_GET_BLOB 0x07
+#define REPORT_ID_WRITE 0x09
+#define REPORT_ID_READ_ADDRESS 0x0a
+#define REPORT_ID_READ_DATA 0x0b
+#define REPORT_ID_SET_RMI_MODE 0x0f
+
+#define PREFIX_USAGE_PAGE_1BYTE 0x05
+#define PREFIX_USAGE_PAGE_2BYTES 0x06
+#define PREFIX_USAGE 0x09
+#define PREFIX_REPORT_ID 0x85
+#define PREFIX_REPORT_COUNT_1BYTE 0x95
+#define PREFIX_REPORT_COUNT_2BYTES 0x96
+
+#define USAGE_GET_BLOB 0xc5
+#define USAGE_WRITE 0x02
+#define USAGE_READ_ADDRESS 0x03
+#define USAGE_READ_DATA 0x04
+#define USAGE_SET_MODE 0x06
+
+#define FEATURE_REPORT_TYPE 0x03
+
+#define VENDOR_DEFINED_PAGE 0xff00
+
+#define BLOB_REPORT_SIZE 256
+
+#define RESET_COMMAND 0x01
+#define GET_REPORT_COMMAND 0x02
+#define SET_REPORT_COMMAND 0x03
+#define SET_POWER_COMMAND 0x08
+
+#define FINGER_MODE 0x00
+#define RMI_MODE 0x02
+
+struct hid_report_info {
+ unsigned char get_blob_id;
+ unsigned char write_id;
+ unsigned char read_addr_id;
+ unsigned char read_data_id;
+ unsigned char set_mode_id;
+ unsigned int blob_size;
+};
+
+static struct hid_report_info hid_report;
+
+struct hid_device_descriptor {
+ unsigned short device_descriptor_length;
+ unsigned short format_version;
+ unsigned short report_descriptor_length;
+ unsigned short report_descriptor_index;
+ unsigned short input_register_index;
+ unsigned short input_report_max_length;
+ unsigned short output_register_index;
+ unsigned short output_report_max_length;
+ unsigned short command_register_index;
+ unsigned short data_register_index;
+ unsigned short vendor_id;
+ unsigned short product_id;
+ unsigned short version_id;
+ unsigned int reserved;
+};
+
+static struct hid_device_descriptor hid_dd;
+
+struct i2c_rw_buffer {
+ unsigned char *read;
+ unsigned char *write;
+ unsigned int read_size;
+ unsigned int write_size;
+};
+
+static struct i2c_rw_buffer buffer;
+
+#ifdef CONFIG_OF
+static int parse_dt(struct device *dev, struct synaptics_dsx_board_data *bdata)
+{
+ int retval;
+ u32 value;
+ const char *name;
+ struct property *prop;
+ struct device_node *np = dev->of_node;
+
+ bdata->irq_gpio = of_get_named_gpio_flags(np,
+ "synaptics,irq-gpio", 0,
+ (enum of_gpio_flags *)&bdata->irq_flags);
+
+ retval = of_property_read_u32(np, "synaptics,irq-on-state",
+ &value);
+ if (retval < 0)
+ bdata->irq_on_state = 0;
+ else
+ bdata->irq_on_state = value;
+
+ retval = of_property_read_string(np, "synaptics,pwr-reg-name", &name);
+ if (retval < 0)
+ bdata->pwr_reg_name = NULL;
+ else
+ bdata->pwr_reg_name = name;
+
+ retval = of_property_read_string(np, "synaptics,bus-reg-name", &name);
+ if (retval < 0)
+ bdata->bus_reg_name = NULL;
+ else
+ bdata->bus_reg_name = name;
+
+ prop = of_find_property(np, "synaptics,power-gpio", NULL);
+ if (prop && prop->length) {
+ bdata->power_gpio = of_get_named_gpio_flags(np,
+ "synaptics,power-gpio", 0, NULL);
+ retval = of_property_read_u32(np, "synaptics,power-on-state",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,power-on-state property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->power_on_state = value;
+ }
+ } else {
+ bdata->power_gpio = -1;
+ }
+
+ prop = of_find_property(np, "synaptics,power-delay-ms", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,power-delay-ms",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,power-delay-ms property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->power_delay_ms = value;
+ }
+ } else {
+ bdata->power_delay_ms = 0;
+ }
+
+ prop = of_find_property(np, "synaptics,reset-gpio", NULL);
+ if (prop && prop->length) {
+ bdata->reset_gpio = of_get_named_gpio_flags(np,
+ "synaptics,reset-gpio", 0, NULL);
+ retval = of_property_read_u32(np, "synaptics,reset-on-state",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,reset-on-state property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->reset_on_state = value;
+ }
+ retval = of_property_read_u32(np, "synaptics,reset-active-ms",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,reset-active-ms property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->reset_active_ms = value;
+ }
+ } else {
+ bdata->reset_gpio = -1;
+ }
+
+ prop = of_find_property(np, "synaptics,reset-delay-ms", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,reset-delay-ms",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,reset-delay-ms property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->reset_delay_ms = value;
+ }
+ } else {
+ bdata->reset_delay_ms = 0;
+ }
+
+ prop = of_find_property(np, "synaptics,dev-dscrptr-addr", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,dev-dscrptr-addr",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,dev-dscrptr-addr property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->device_descriptor_addr = (unsigned short)value;
+ }
+ } else {
+ bdata->device_descriptor_addr = 0;
+ }
+
+ prop = of_find_property(np, "synaptics,max-y-for-2d", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,max-y-for-2d",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,max-y-for-2d property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->max_y_for_2d = value;
+ }
+ } else {
+ bdata->max_y_for_2d = -1;
+ }
+
+ prop = of_find_property(np, "synaptics,swap-axes", NULL);
+ bdata->swap_axes = prop > 0 ? true : false;
+
+ prop = of_find_property(np, "synaptics,x-flip", NULL);
+ bdata->x_flip = prop > 0 ? true : false;
+
+ prop = of_find_property(np, "synaptics,y-flip", NULL);
+ bdata->y_flip = prop > 0 ? true : false;
+
+ prop = of_find_property(np, "synaptics,ub-i2c-addr", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,ub-i2c-addr",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,ub-i2c-addr property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->ub_i2c_addr = (unsigned short)value;
+ }
+ } else {
+ bdata->ub_i2c_addr = -1;
+ }
+
+ prop = of_find_property(np, "synaptics,cap-button-codes", NULL);
+ if (prop && prop->length) {
+ bdata->cap_button_map->map = devm_kzalloc(dev,
+ prop->length,
+ GFP_KERNEL);
+ if (!bdata->cap_button_map->map)
+ return -ENOMEM;
+ bdata->cap_button_map->nbuttons = prop->length / sizeof(u32);
+ retval = of_property_read_u32_array(np,
+ "synaptics,cap-button-codes",
+ bdata->cap_button_map->map,
+ bdata->cap_button_map->nbuttons);
+ if (retval < 0) {
+ bdata->cap_button_map->nbuttons = 0;
+ bdata->cap_button_map->map = NULL;
+ }
+ } else {
+ bdata->cap_button_map->nbuttons = 0;
+ bdata->cap_button_map->map = NULL;
+ }
+
+ prop = of_find_property(np, "synaptics,vir-button-codes", NULL);
+ if (prop && prop->length) {
+ bdata->vir_button_map->map = devm_kzalloc(dev,
+ prop->length,
+ GFP_KERNEL);
+ if (!bdata->vir_button_map->map)
+ return -ENOMEM;
+ bdata->vir_button_map->nbuttons = prop->length / sizeof(u32);
+ bdata->vir_button_map->nbuttons /= 5;
+ retval = of_property_read_u32_array(np,
+ "synaptics,vir-button-codes",
+ bdata->vir_button_map->map,
+ bdata->vir_button_map->nbuttons * 5);
+ if (retval < 0) {
+ bdata->vir_button_map->nbuttons = 0;
+ bdata->vir_button_map->map = NULL;
+ }
+ } else {
+ bdata->vir_button_map->nbuttons = 0;
+ bdata->vir_button_map->map = NULL;
+ }
+
+ return 0;
+}
+#endif
+
+static int do_i2c_transfer(struct i2c_client *client, struct i2c_msg *msg)
+{
+ unsigned char retry;
+
+ for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
+ if (i2c_transfer(client->adapter, msg, 1) == 1)
+ break;
+ dev_err(&client->dev,
+ "%s: I2C retry %d\n",
+ __func__, retry + 1);
+ msleep(20);
+ }
+
+ if (retry == SYN_I2C_RETRY_TIMES) {
+ dev_err(&client->dev,
+ "%s: I2C transfer over retry limit\n",
+ __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int check_buffer(unsigned char **buffer, unsigned int *buffer_size,
+ unsigned int length)
+{
+ if (*buffer_size < length) {
+ if (*buffer_size)
+ kfree(*buffer);
+ *buffer = kzalloc(length, GFP_KERNEL);
+ if (!(*buffer))
+ return -ENOMEM;
+ *buffer_size = length;
+ }
+
+ return 0;
+}
+
+static int generic_read(struct i2c_client *client, unsigned short length)
+{
+ int retval;
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = length,
+ }
+ };
+
+ check_buffer(&buffer.read, &buffer.read_size, length);
+ msg[0].buf = buffer.read;
+
+ retval = do_i2c_transfer(client, msg);
+
+ return retval;
+}
+
+static int generic_write(struct i2c_client *client, unsigned short length)
+{
+ int retval;
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = length,
+ .buf = buffer.write,
+ }
+ };
+
+ retval = do_i2c_transfer(client, msg);
+
+ return retval;
+}
+
+static void traverse_report_descriptor(unsigned int *index)
+{
+ unsigned char size;
+ unsigned char *buf = buffer.read;
+
+ size = buf[*index] & MASK_2BIT;
+ switch (size) {
+ case 0: /* 0 bytes */
+ *index += 1;
+ break;
+ case 1: /* 1 byte */
+ *index += 2;
+ break;
+ case 2: /* 2 bytes */
+ *index += 3;
+ break;
+ case 3: /* 4 bytes */
+ *index += 5;
+ break;
+ default:
+ break;
+ }
+
+ return;
+}
+
+static void find_blob_size(unsigned int index)
+{
+ unsigned int ii = index;
+ unsigned char *buf = buffer.read;
+
+ while (ii < hid_dd.report_descriptor_length) {
+ if (buf[ii] == PREFIX_REPORT_COUNT_1BYTE) {
+ hid_report.blob_size = buf[ii + 1];
+ return;
+ } else if (buf[ii] == PREFIX_REPORT_COUNT_2BYTES) {
+ hid_report.blob_size = buf[ii + 1] | (buf[ii + 2] << 8);
+ return;
+ }
+ traverse_report_descriptor(&ii);
+ }
+
+ return;
+}
+
+static void find_reports(unsigned int index)
+{
+ unsigned int ii = index;
+ unsigned char *buf = buffer.read;
+ static unsigned int report_id_index;
+ static unsigned char report_id;
+ static unsigned short usage_page;
+
+ if (buf[ii] == PREFIX_REPORT_ID) {
+ report_id = buf[ii + 1];
+ report_id_index = ii;
+ return;
+ }
+
+ if (buf[ii] == PREFIX_USAGE_PAGE_1BYTE) {
+ usage_page = buf[ii + 1];
+ return;
+ } else if (buf[ii] == PREFIX_USAGE_PAGE_2BYTES) {
+ usage_page = buf[ii + 1] | (buf[ii + 2] << 8);
+ return;
+ }
+
+ if ((usage_page == VENDOR_DEFINED_PAGE) && (buf[ii] == PREFIX_USAGE)) {
+ switch (buf[ii + 1]) {
+ case USAGE_GET_BLOB:
+ hid_report.get_blob_id = report_id;
+ find_blob_size(report_id_index);
+ break;
+ case USAGE_WRITE:
+ hid_report.write_id = report_id;
+ break;
+ case USAGE_READ_ADDRESS:
+ hid_report.read_addr_id = report_id;
+ break;
+ case USAGE_READ_DATA:
+ hid_report.read_data_id = report_id;
+ break;
+ case USAGE_SET_MODE:
+ hid_report.set_mode_id = report_id;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return;
+}
+
+static int parse_report_descriptor(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned int ii = 0;
+ unsigned char *buf;
+ struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+
+ buffer.write[0] = hid_dd.report_descriptor_index & MASK_8BIT;
+ buffer.write[1] = hid_dd.report_descriptor_index >> 8;
+ retval = generic_write(i2c, 2);
+ if (retval < 0)
+ return retval;
+ retval = generic_read(i2c, hid_dd.report_descriptor_length);
+ if (retval < 0)
+ return retval;
+
+ buf = buffer.read;
+
+ hid_report.get_blob_id = REPORT_ID_GET_BLOB;
+ hid_report.write_id = REPORT_ID_WRITE;
+ hid_report.read_addr_id = REPORT_ID_READ_ADDRESS;
+ hid_report.read_data_id = REPORT_ID_READ_DATA;
+ hid_report.set_mode_id = REPORT_ID_SET_RMI_MODE;
+ hid_report.blob_size = BLOB_REPORT_SIZE;
+
+ while (ii < hid_dd.report_descriptor_length) {
+ find_reports(ii);
+ traverse_report_descriptor(&ii);
+ }
+
+ return 0;
+}
+
+static int switch_to_rmi(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+
+ mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ check_buffer(&buffer.write, &buffer.write_size, 11);
+
+ /* set rmi mode */
+ buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+ buffer.write[1] = hid_dd.command_register_index >> 8;
+ buffer.write[2] = (FEATURE_REPORT_TYPE << 4) | hid_report.set_mode_id;
+ buffer.write[3] = SET_REPORT_COMMAND;
+ buffer.write[4] = hid_report.set_mode_id;
+ buffer.write[5] = hid_dd.data_register_index & MASK_8BIT;
+ buffer.write[6] = hid_dd.data_register_index >> 8;
+ buffer.write[7] = 0x04;
+ buffer.write[8] = 0x00;
+ buffer.write[9] = hid_report.set_mode_id;
+ buffer.write[10] = RMI_MODE;
+
+ retval = generic_write(i2c, 11);
+
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ return retval;
+}
+
+static int check_report_mode(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned short report_size;
+ struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+
+ mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ check_buffer(&buffer.write, &buffer.write_size, 7);
+
+ buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+ buffer.write[1] = hid_dd.command_register_index >> 8;
+ buffer.write[2] = (FEATURE_REPORT_TYPE << 4) | hid_report.set_mode_id;
+ buffer.write[3] = GET_REPORT_COMMAND;
+ buffer.write[4] = hid_report.set_mode_id;
+ buffer.write[5] = hid_dd.data_register_index & MASK_8BIT;
+ buffer.write[6] = hid_dd.data_register_index >> 8;
+
+ retval = generic_write(i2c, 7);
+ if (retval < 0)
+ goto exit;
+
+ retval = generic_read(i2c, 2);
+ if (retval < 0)
+ goto exit;
+
+ report_size = (buffer.read[1] << 8) | buffer.read[0];
+
+ retval = generic_write(i2c, 7);
+ if (retval < 0)
+ goto exit;
+
+ retval = generic_read(i2c, report_size);
+ if (retval < 0)
+ goto exit;
+
+ retval = buffer.read[3];
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Report mode = %d\n",
+ __func__, retval);
+
+exit:
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ return retval;
+}
+
+static int hid_i2c_init(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ check_buffer(&buffer.write, &buffer.write_size, 6);
+
+ /* read device descriptor */
+ buffer.write[0] = bdata->device_descriptor_addr & MASK_8BIT;
+ buffer.write[1] = bdata->device_descriptor_addr >> 8;
+ retval = generic_write(i2c, 2);
+ if (retval < 0)
+ goto exit;
+ retval = generic_read(i2c, sizeof(hid_dd));
+ if (retval < 0)
+ goto exit;
+ retval = secure_memcpy((unsigned char *)&hid_dd,
+ sizeof(struct hid_device_descriptor),
+ buffer.read,
+ buffer.read_size,
+ sizeof(hid_dd));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy device descriptor data\n",
+ __func__);
+ goto exit;
+ }
+
+ retval = parse_report_descriptor(rmi4_data);
+ if (retval < 0)
+ goto exit;
+
+ /* set power */
+ buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+ buffer.write[1] = hid_dd.command_register_index >> 8;
+ buffer.write[2] = 0x00;
+ buffer.write[3] = SET_POWER_COMMAND;
+ retval = generic_write(i2c, 4);
+ if (retval < 0)
+ goto exit;
+
+ /* reset */
+ buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+ buffer.write[1] = hid_dd.command_register_index >> 8;
+ buffer.write[2] = 0x00;
+ buffer.write[3] = RESET_COMMAND;
+ retval = generic_write(i2c, 4);
+ if (retval < 0)
+ goto exit;
+
+ while (gpio_get_value(bdata->irq_gpio))
+ msleep(20);
+
+ retval = generic_read(i2c, hid_dd.input_report_max_length);
+ if (retval < 0)
+ goto exit;
+
+ /* get blob */
+ buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+ buffer.write[1] = hid_dd.command_register_index >> 8;
+ buffer.write[2] = (FEATURE_REPORT_TYPE << 4) | hid_report.get_blob_id;
+ buffer.write[3] = 0x02;
+ buffer.write[4] = hid_dd.data_register_index & MASK_8BIT;
+ buffer.write[5] = hid_dd.data_register_index >> 8;
+
+ retval = generic_write(i2c, 6);
+ if (retval < 0)
+ goto exit;
+
+ msleep(20);
+
+ retval = generic_read(i2c, hid_report.blob_size + 3);
+ if (retval < 0)
+ goto exit;
+
+exit:
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to initialize HID/I2C interface\n",
+ __func__);
+ return retval;
+ }
+
+ retval = switch_to_rmi(rmi4_data);
+
+ return retval;
+}
+
+static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
+ unsigned short addr, unsigned char *data, unsigned int length)
+{
+ int retval;
+ unsigned char retry;
+ unsigned char recover = 1;
+ unsigned short report_length;
+ struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+ struct i2c_msg msg[] = {
+ {
+ .addr = i2c->addr,
+ .flags = 0,
+ .len = hid_dd.output_report_max_length + 2,
+ },
+ {
+ .addr = i2c->addr,
+ .flags = I2C_M_RD,
+ .len = (unsigned short)(length + 4),
+ },
+ };
+
+recover:
+ mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ check_buffer(&buffer.write, &buffer.write_size,
+ hid_dd.output_report_max_length + 2);
+ msg[0].buf = buffer.write;
+ buffer.write[0] = hid_dd.output_register_index & MASK_8BIT;
+ buffer.write[1] = hid_dd.output_register_index >> 8;
+ buffer.write[2] = hid_dd.output_report_max_length & MASK_8BIT;
+ buffer.write[3] = hid_dd.output_report_max_length >> 8;
+ buffer.write[4] = hid_report.read_addr_id;
+ buffer.write[5] = 0x00;
+ buffer.write[6] = addr & MASK_8BIT;
+ buffer.write[7] = addr >> 8;
+ buffer.write[8] = (unsigned char)length;
+ buffer.write[9] = (unsigned char)(length >> 8);
+
+ check_buffer(&buffer.read, &buffer.read_size, length + 4);
+ msg[1].buf = buffer.read;
+
+ retval = do_i2c_transfer(i2c, &msg[0]);
+ if (retval != 0)
+ goto exit;
+
+ retry = 0;
+ do {
+ retval = do_i2c_transfer(i2c, &msg[1]);
+ if (retval == 0)
+ retval = length;
+ else
+ goto exit;
+
+ report_length = (buffer.read[1] << 8) | buffer.read[0];
+ if (report_length == hid_dd.input_report_max_length) {
+ retval = secure_memcpy(&data[0], length,
+ &buffer.read[4], buffer.read_size - 4,
+ length);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy data\n",
+ __func__);
+ } else {
+ retval = length;
+ }
+ goto exit;
+ }
+
+ msleep(20);
+ retry++;
+ } while (retry < SYN_I2C_RETRY_TIMES);
+
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to receive read report\n",
+ __func__);
+ retval = -EIO;
+
+exit:
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ if ((retval != length) && (recover == 1)) {
+ recover = 0;
+ if (check_report_mode(rmi4_data) != RMI_MODE) {
+ retval = hid_i2c_init(rmi4_data);
+ if (retval == 0)
+ goto recover;
+ }
+ }
+
+ return retval;
+}
+
+static int synaptics_rmi4_i2c_write(struct synaptics_rmi4_data *rmi4_data,
+ unsigned short addr, unsigned char *data, unsigned int length)
+{
+ int retval;
+ unsigned char recover = 1;
+ unsigned int msg_length;
+ struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+ struct i2c_msg msg[] = {
+ {
+ .addr = i2c->addr,
+ .flags = 0,
+ }
+ };
+
+ if ((length + 10) < (hid_dd.output_report_max_length + 2))
+ msg_length = hid_dd.output_report_max_length + 2;
+ else
+ msg_length = length + 10;
+
+recover:
+ mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ check_buffer(&buffer.write, &buffer.write_size, msg_length);
+ msg[0].len = (unsigned short)msg_length;
+ msg[0].buf = buffer.write;
+ buffer.write[0] = hid_dd.output_register_index & MASK_8BIT;
+ buffer.write[1] = hid_dd.output_register_index >> 8;
+ buffer.write[2] = hid_dd.output_report_max_length & MASK_8BIT;
+ buffer.write[3] = hid_dd.output_report_max_length >> 8;
+ buffer.write[4] = hid_report.write_id;
+ buffer.write[5] = 0x00;
+ buffer.write[6] = addr & MASK_8BIT;
+ buffer.write[7] = addr >> 8;
+ buffer.write[8] = (unsigned char)length;
+ buffer.write[9] = (unsigned char)(length >> 8);
+ retval = secure_memcpy(&buffer.write[10], buffer.write_size - 10,
+ &data[0], length, length);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy data\n",
+ __func__);
+ } else {
+ retval = do_i2c_transfer(i2c, msg);
+ if (retval == 0)
+ retval = length;
+ }
+
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ if ((retval != length) && (recover == 1)) {
+ recover = 0;
+ if (check_report_mode(rmi4_data) != RMI_MODE) {
+ retval = hid_i2c_init(rmi4_data);
+ if (retval == 0)
+ goto recover;
+ }
+ }
+
+ return retval;
+}
+
+static struct synaptics_dsx_bus_access bus_access = {
+ .type = BUS_I2C,
+ .read = synaptics_rmi4_i2c_read,
+ .write = synaptics_rmi4_i2c_write,
+};
+
+static struct synaptics_dsx_hw_interface hw_if;
+
+static struct platform_device *synaptics_dsx_i2c_device;
+
+static void synaptics_rmi4_i2c_dev_release(struct device *dev)
+{
+ kfree(synaptics_dsx_i2c_device);
+
+ return;
+}
+
+static int synaptics_rmi4_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *dev_id)
+{
+ int retval;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev,
+ "%s: SMBus byte data commands not supported by host\n",
+ __func__);
+ return -EIO;
+ }
+
+ synaptics_dsx_i2c_device = kzalloc(
+ sizeof(struct platform_device),
+ GFP_KERNEL);
+ if (!synaptics_dsx_i2c_device) {
+ dev_err(&client->dev,
+ "%s: Failed to allocate memory for synaptics_dsx_i2c_device\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+#ifdef CONFIG_OF
+ if (client->dev.of_node) {
+ hw_if.board_data = devm_kzalloc(&client->dev,
+ sizeof(struct synaptics_dsx_board_data),
+ GFP_KERNEL);
+ if (!hw_if.board_data) {
+ dev_err(&client->dev,
+ "%s: Failed to allocate memory for board data\n",
+ __func__);
+ return -ENOMEM;
+ }
+ hw_if.board_data->cap_button_map = devm_kzalloc(&client->dev,
+ sizeof(struct synaptics_dsx_button_map),
+ GFP_KERNEL);
+ if (!hw_if.board_data->cap_button_map) {
+ dev_err(&client->dev,
+ "%s: Failed to allocate memory for 0D button map\n",
+ __func__);
+ return -ENOMEM;
+ }
+ hw_if.board_data->vir_button_map = devm_kzalloc(&client->dev,
+ sizeof(struct synaptics_dsx_button_map),
+ GFP_KERNEL);
+ if (!hw_if.board_data->vir_button_map) {
+ dev_err(&client->dev,
+ "%s: Failed to allocate memory for virtual button map\n",
+ __func__);
+ return -ENOMEM;
+ }
+ parse_dt(&client->dev, hw_if.board_data);
+ }
+#else
+ hw_if.board_data = client->dev.platform_data;
+#endif
+
+ hw_if.bus_access = &bus_access;
+ hw_if.bl_hw_init = switch_to_rmi;
+ hw_if.ui_hw_init = hid_i2c_init;
+
+ synaptics_dsx_i2c_device->name = PLATFORM_DRIVER_NAME;
+ synaptics_dsx_i2c_device->id = 0;
+ synaptics_dsx_i2c_device->num_resources = 0;
+ synaptics_dsx_i2c_device->dev.parent = &client->dev;
+ synaptics_dsx_i2c_device->dev.platform_data = &hw_if;
+ synaptics_dsx_i2c_device->dev.release = synaptics_rmi4_i2c_dev_release;
+
+ retval = platform_device_register(synaptics_dsx_i2c_device);
+ if (retval) {
+ dev_err(&client->dev,
+ "%s: Failed to register platform device\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int synaptics_rmi4_i2c_remove(struct i2c_client *client)
+{
+ if (buffer.read_size)
+ kfree(buffer.read);
+
+ if (buffer.write_size)
+ kfree(buffer.write);
+
+ platform_device_unregister(synaptics_dsx_i2c_device);
+
+ return 0;
+}
+
+static const struct i2c_device_id synaptics_rmi4_id_table[] = {
+ {I2C_DRIVER_NAME, 0},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, synaptics_rmi4_id_table);
+
+#ifdef CONFIG_OF
+static struct of_device_id synaptics_rmi4_of_match_table[] = {
+ {
+ .compatible = "synaptics,dsx-rmi-hid-i2c",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, synaptics_rmi4_of_match_table);
+#else
+#define synaptics_rmi4_of_match_table NULL
+#endif
+
+static struct i2c_driver synaptics_rmi4_i2c_driver = {
+ .driver = {
+ .name = I2C_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = synaptics_rmi4_of_match_table,
+ },
+ .probe = synaptics_rmi4_i2c_probe,
+ .remove = synaptics_rmi4_i2c_remove,
+ .id_table = synaptics_rmi4_id_table,
+};
+
+int synaptics_rmi4_bus_init(void)
+{
+ return i2c_add_driver(&synaptics_rmi4_i2c_driver);
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_init);
+
+void synaptics_rmi4_bus_exit(void)
+{
+ i2c_del_driver(&synaptics_rmi4_i2c_driver);
+
+ return;
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX I2C Bus Support Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_spi.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_spi.c
new file mode 100644
index 0000000..e2dafbb
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_spi.c
@@ -0,0 +1,712 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/types.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define SPI_READ 0x80
+#define SPI_WRITE 0x00
+
+static unsigned char *buf;
+
+static struct spi_transfer *xfer;
+
+#ifdef CONFIG_OF
+static int parse_dt(struct device *dev, struct synaptics_dsx_board_data *bdata)
+{
+ int retval;
+ u32 value;
+ const char *name;
+ struct property *prop;
+ struct device_node *np = dev->of_node;
+
+ bdata->irq_gpio = of_get_named_gpio_flags(np,
+ "synaptics,irq-gpio", 0,
+ (enum of_gpio_flags *)&bdata->irq_flags);
+
+ retval = of_property_read_u32(np, "synaptics,irq-on-state",
+ &value);
+ if (retval < 0)
+ bdata->irq_on_state = 0;
+ else
+ bdata->irq_on_state = value;
+
+ retval = of_property_read_string(np, "synaptics,pwr-reg-name", &name);
+ if (retval < 0)
+ bdata->pwr_reg_name = NULL;
+ else
+ bdata->pwr_reg_name = name;
+
+ retval = of_property_read_string(np, "synaptics,bus-reg-name", &name);
+ if (retval < 0)
+ bdata->bus_reg_name = NULL;
+ else
+ bdata->bus_reg_name = name;
+
+ prop = of_find_property(np, "synaptics,power-gpio", NULL);
+ if (prop && prop->length) {
+ bdata->power_gpio = of_get_named_gpio_flags(np,
+ "synaptics,power-gpio", 0, NULL);
+ retval = of_property_read_u32(np, "synaptics,power-on-state",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,power-on-state property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->power_on_state = value;
+ }
+ } else {
+ bdata->power_gpio = -1;
+ }
+
+ prop = of_find_property(np, "synaptics,power-delay-ms", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,power-delay-ms",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,power-delay-ms property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->power_delay_ms = value;
+ }
+ } else {
+ bdata->power_delay_ms = 0;
+ }
+
+ prop = of_find_property(np, "synaptics,reset-gpio", NULL);
+ if (prop && prop->length) {
+ bdata->reset_gpio = of_get_named_gpio_flags(np,
+ "synaptics,reset-gpio", 0, NULL);
+ retval = of_property_read_u32(np, "synaptics,reset-on-state",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,reset-on-state property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->reset_on_state = value;
+ }
+ retval = of_property_read_u32(np, "synaptics,reset-active-ms",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,reset-active-ms property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->reset_active_ms = value;
+ }
+ } else {
+ bdata->reset_gpio = -1;
+ }
+
+ prop = of_find_property(np, "synaptics,reset-delay-ms", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,reset-delay-ms",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,reset-delay-ms property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->reset_delay_ms = value;
+ }
+ } else {
+ bdata->reset_delay_ms = 0;
+ }
+
+ prop = of_find_property(np, "synaptics,byte-delay-us", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,byte-delay-us",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,byte-delay-us property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->byte_delay_us = value;
+ }
+ } else {
+ bdata->byte_delay_us = 0;
+ }
+
+ prop = of_find_property(np, "synaptics,block-delay-us", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,block-delay-us",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,block-delay-us property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->block_delay_us = value;
+ }
+ } else {
+ bdata->block_delay_us = 0;
+ }
+
+ prop = of_find_property(np, "synaptics,address-delay-us", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,address-delay-us",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,address-delay-us property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->addr_delay_us = value;
+ }
+ } else {
+ bdata->addr_delay_us = 0;
+ }
+
+ prop = of_find_property(np, "synaptics,max-y-for-2d", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,max-y-for-2d",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,max-y-for-2d property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->max_y_for_2d = value;
+ }
+ } else {
+ bdata->max_y_for_2d = -1;
+ }
+
+ prop = of_find_property(np, "synaptics,swap-axes", NULL);
+ bdata->swap_axes = prop > 0 ? true : false;
+
+ prop = of_find_property(np, "synaptics,x-flip", NULL);
+ bdata->x_flip = prop > 0 ? true : false;
+
+ prop = of_find_property(np, "synaptics,y-flip", NULL);
+ bdata->y_flip = prop > 0 ? true : false;
+
+ prop = of_find_property(np, "synaptics,ub-i2c-addr", NULL);
+ if (prop && prop->length) {
+ retval = of_property_read_u32(np, "synaptics,ub-i2c-addr",
+ &value);
+ if (retval < 0) {
+ dev_err(dev, "%s: Unable to read synaptics,ub-i2c-addr property\n",
+ __func__);
+ return retval;
+ } else {
+ bdata->ub_i2c_addr = (unsigned short)value;
+ }
+ } else {
+ bdata->ub_i2c_addr = -1;
+ }
+
+ prop = of_find_property(np, "synaptics,cap-button-codes", NULL);
+ if (prop && prop->length) {
+ bdata->cap_button_map->map = devm_kzalloc(dev,
+ prop->length,
+ GFP_KERNEL);
+ if (!bdata->cap_button_map->map)
+ return -ENOMEM;
+ bdata->cap_button_map->nbuttons = prop->length / sizeof(u32);
+ retval = of_property_read_u32_array(np,
+ "synaptics,cap-button-codes",
+ bdata->cap_button_map->map,
+ bdata->cap_button_map->nbuttons);
+ if (retval < 0) {
+ bdata->cap_button_map->nbuttons = 0;
+ bdata->cap_button_map->map = NULL;
+ }
+ } else {
+ bdata->cap_button_map->nbuttons = 0;
+ bdata->cap_button_map->map = NULL;
+ }
+
+ prop = of_find_property(np, "synaptics,vir-button-codes", NULL);
+ if (prop && prop->length) {
+ bdata->vir_button_map->map = devm_kzalloc(dev,
+ prop->length,
+ GFP_KERNEL);
+ if (!bdata->vir_button_map->map)
+ return -ENOMEM;
+ bdata->vir_button_map->nbuttons = prop->length / sizeof(u32);
+ bdata->vir_button_map->nbuttons /= 5;
+ retval = of_property_read_u32_array(np,
+ "synaptics,vir-button-codes",
+ bdata->vir_button_map->map,
+ bdata->vir_button_map->nbuttons * 5);
+ if (retval < 0) {
+ bdata->vir_button_map->nbuttons = 0;
+ bdata->vir_button_map->map = NULL;
+ }
+ } else {
+ bdata->vir_button_map->nbuttons = 0;
+ bdata->vir_button_map->map = NULL;
+ }
+
+ return 0;
+}
+#endif
+
+static int synaptics_rmi4_spi_alloc_buf(struct synaptics_rmi4_data *rmi4_data,
+ unsigned int size, unsigned int count)
+{
+ static unsigned int buf_size;
+ static unsigned int xfer_count;
+
+ if (size > buf_size) {
+ if (buf_size)
+ kfree(buf);
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for buf\n",
+ __func__);
+ buf_size = 0;
+ return -ENOMEM;
+ }
+ buf_size = size;
+ }
+
+ if (count > xfer_count) {
+ if (xfer_count)
+ kfree(xfer);
+ xfer = kcalloc(count, sizeof(struct spi_transfer), GFP_KERNEL);
+ if (!xfer) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for xfer\n",
+ __func__);
+ xfer_count = 0;
+ return -ENOMEM;
+ }
+ xfer_count = count;
+ } else {
+ memset(xfer, 0, count * sizeof(struct spi_transfer));
+ }
+
+ return 0;
+}
+
+static int synaptics_rmi4_spi_set_page(struct synaptics_rmi4_data *rmi4_data,
+ unsigned short addr)
+{
+ int retval;
+ unsigned int index;
+ unsigned int byte_count = PAGE_SELECT_LEN + 1;
+ unsigned char page;
+ struct spi_message msg;
+ struct spi_device *spi = to_spi_device(rmi4_data->pdev->dev.parent);
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ page = ((addr >> 8) & MASK_8BIT);
+ if ((page >> 7) == (rmi4_data->current_page >> 7))
+ return PAGE_SELECT_LEN;
+
+ spi_message_init(&msg);
+
+ retval = synaptics_rmi4_spi_alloc_buf(rmi4_data, byte_count,
+ byte_count);
+ if (retval < 0)
+ return retval;
+
+ buf[0] = SPI_WRITE;
+ buf[1] = MASK_8BIT;
+ buf[2] = page;
+
+ if (bdata->byte_delay_us == 0) {
+ xfer[0].len = byte_count;
+ xfer[0].tx_buf = &buf[0];
+ if (bdata->block_delay_us)
+ xfer[0].delay_usecs = bdata->block_delay_us;
+ spi_message_add_tail(&xfer[0], &msg);
+ } else {
+ for (index = 0; index < byte_count; index++) {
+ xfer[index].len = 1;
+ xfer[index].tx_buf = &buf[index];
+ if (index == 1)
+ xfer[index].delay_usecs = bdata->addr_delay_us;
+ else
+ xfer[index].delay_usecs = bdata->byte_delay_us;
+ spi_message_add_tail(&xfer[index], &msg);
+ }
+ if (bdata->block_delay_us)
+ xfer[index - 1].delay_usecs = bdata->block_delay_us;
+ }
+
+ retval = spi_sync(spi, &msg);
+ if (retval == 0) {
+ rmi4_data->current_page = page;
+ retval = PAGE_SELECT_LEN;
+ } else {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to complete SPI transfer, error = %d\n",
+ __func__, retval);
+ }
+
+ return retval;
+}
+
+static int synaptics_rmi4_spi_read(struct synaptics_rmi4_data *rmi4_data,
+ unsigned short addr, unsigned char *data, unsigned int length)
+{
+ int retval;
+ unsigned int index;
+ unsigned int byte_count = length + ADDRESS_LEN;
+ unsigned char txbuf[ADDRESS_LEN];
+ struct spi_message msg;
+ struct spi_device *spi = to_spi_device(rmi4_data->pdev->dev.parent);
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ spi_message_init(&msg);
+
+ txbuf[0] = (addr >> 8) | SPI_READ;
+ txbuf[1] = addr & MASK_8BIT;
+
+ mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ retval = synaptics_rmi4_spi_set_page(rmi4_data, addr);
+ if (retval != PAGE_SELECT_LEN) {
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+ return -EIO;
+ }
+
+ if (bdata->byte_delay_us == 0) {
+ retval = synaptics_rmi4_spi_alloc_buf(rmi4_data, length,
+ 2);
+ } else {
+ retval = synaptics_rmi4_spi_alloc_buf(rmi4_data, length,
+ byte_count);
+ }
+ if (retval < 0) {
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+ return retval;
+ }
+
+ if (bdata->byte_delay_us == 0) {
+ xfer[0].len = ADDRESS_LEN;
+ xfer[0].tx_buf = &txbuf[0];
+ spi_message_add_tail(&xfer[0], &msg);
+ xfer[1].len = length;
+ xfer[1].rx_buf = &buf[0];
+ if (bdata->block_delay_us)
+ xfer[1].delay_usecs = bdata->block_delay_us;
+ spi_message_add_tail(&xfer[1], &msg);
+ } else {
+ for (index = 0; index < byte_count; index++) {
+ xfer[index].len = 1;
+ if (index < ADDRESS_LEN)
+ xfer[index].tx_buf = &txbuf[index];
+ else
+ xfer[index].rx_buf = &buf[index - ADDRESS_LEN];
+ if (index == 1)
+ xfer[index].delay_usecs = bdata->addr_delay_us;
+ else
+ xfer[index].delay_usecs = bdata->byte_delay_us;
+ spi_message_add_tail(&xfer[index], &msg);
+ }
+ if (bdata->block_delay_us)
+ xfer[index - 1].delay_usecs = bdata->block_delay_us;
+ }
+
+ retval = spi_sync(spi, &msg);
+ if (retval == 0) {
+ retval = secure_memcpy(data, length, buf, length, length);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy data\n",
+ __func__);
+ } else {
+ retval = length;
+ }
+ } else {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to complete SPI transfer, error = %d\n",
+ __func__, retval);
+ }
+
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ return retval;
+}
+
+static int synaptics_rmi4_spi_write(struct synaptics_rmi4_data *rmi4_data,
+ unsigned short addr, unsigned char *data, unsigned int length)
+{
+ int retval;
+ unsigned int index;
+ unsigned int byte_count = length + ADDRESS_LEN;
+ struct spi_message msg;
+ struct spi_device *spi = to_spi_device(rmi4_data->pdev->dev.parent);
+ const struct synaptics_dsx_board_data *bdata =
+ rmi4_data->hw_if->board_data;
+
+ spi_message_init(&msg);
+
+ mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ retval = synaptics_rmi4_spi_set_page(rmi4_data, addr);
+ if (retval != PAGE_SELECT_LEN) {
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+ return -EIO;
+ }
+
+ if (bdata->byte_delay_us == 0) {
+ retval = synaptics_rmi4_spi_alloc_buf(rmi4_data, byte_count,
+ 1);
+ } else {
+ retval = synaptics_rmi4_spi_alloc_buf(rmi4_data, byte_count,
+ byte_count);
+ }
+ if (retval < 0) {
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+ return retval;
+ }
+
+ buf[0] = (addr >> 8) & ~SPI_READ;
+ buf[1] = addr & MASK_8BIT;
+ retval = secure_memcpy(&buf[ADDRESS_LEN],
+ byte_count - ADDRESS_LEN, data, length, length);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy data\n",
+ __func__);
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+ return retval;
+ }
+
+ if (bdata->byte_delay_us == 0) {
+ xfer[0].len = byte_count;
+ xfer[0].tx_buf = &buf[0];
+ if (bdata->block_delay_us)
+ xfer[0].delay_usecs = bdata->block_delay_us;
+ spi_message_add_tail(xfer, &msg);
+ } else {
+ for (index = 0; index < byte_count; index++) {
+ xfer[index].len = 1;
+ xfer[index].tx_buf = &buf[index];
+ if (index == 1)
+ xfer[index].delay_usecs = bdata->addr_delay_us;
+ else
+ xfer[index].delay_usecs = bdata->byte_delay_us;
+ spi_message_add_tail(&xfer[index], &msg);
+ }
+ if (bdata->block_delay_us)
+ xfer[index - 1].delay_usecs = bdata->block_delay_us;
+ }
+
+ retval = spi_sync(spi, &msg);
+ if (retval == 0) {
+ retval = length;
+ } else {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to complete SPI transfer, error = %d\n",
+ __func__, retval);
+ }
+
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ return retval;
+}
+
+static struct synaptics_dsx_bus_access bus_access = {
+ .type = BUS_SPI,
+ .read = synaptics_rmi4_spi_read,
+ .write = synaptics_rmi4_spi_write,
+};
+
+static struct synaptics_dsx_hw_interface hw_if;
+
+static struct platform_device *synaptics_dsx_spi_device;
+
+static void synaptics_rmi4_spi_dev_release(struct device *dev)
+{
+ kfree(synaptics_dsx_spi_device);
+
+ return;
+}
+
+static int synaptics_rmi4_spi_probe(struct spi_device *spi)
+{
+ int retval;
+
+ if (spi->master->flags & SPI_MASTER_HALF_DUPLEX) {
+ dev_err(&spi->dev,
+ "%s: Full duplex not supported by host\n",
+ __func__);
+ return -EIO;
+ }
+
+ synaptics_dsx_spi_device = kzalloc(
+ sizeof(struct platform_device),
+ GFP_KERNEL);
+ if (!synaptics_dsx_spi_device) {
+ dev_err(&spi->dev,
+ "%s: Failed to allocate memory for synaptics_dsx_spi_device\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+#ifdef CONFIG_OF
+ if (spi->dev.of_node) {
+ hw_if.board_data = devm_kzalloc(&spi->dev,
+ sizeof(struct synaptics_dsx_board_data),
+ GFP_KERNEL);
+ if (!hw_if.board_data) {
+ dev_err(&spi->dev,
+ "%s: Failed to allocate memory for board data\n",
+ __func__);
+ return -ENOMEM;
+ }
+ hw_if.board_data->cap_button_map = devm_kzalloc(&spi->dev,
+ sizeof(struct synaptics_dsx_button_map),
+ GFP_KERNEL);
+ if (!hw_if.board_data->cap_button_map) {
+ dev_err(&spi->dev,
+ "%s: Failed to allocate memory for 0D button map\n",
+ __func__);
+ return -ENOMEM;
+ }
+ hw_if.board_data->vir_button_map = devm_kzalloc(&spi->dev,
+ sizeof(struct synaptics_dsx_button_map),
+ GFP_KERNEL);
+ if (!hw_if.board_data->vir_button_map) {
+ dev_err(&spi->dev,
+ "%s: Failed to allocate memory for virtual button map\n",
+ __func__);
+ return -ENOMEM;
+ }
+ parse_dt(&spi->dev, hw_if.board_data);
+ }
+#else
+ hw_if.board_data = spi->dev.platform_data;
+#endif
+
+ hw_if.bus_access = &bus_access;
+
+ spi->bits_per_word = 8;
+ spi->mode = SPI_MODE_3;
+
+ retval = spi_setup(spi);
+ if (retval < 0) {
+ dev_err(&spi->dev,
+ "%s: Failed to perform SPI setup\n",
+ __func__);
+ return retval;
+ }
+
+ synaptics_dsx_spi_device->name = PLATFORM_DRIVER_NAME;
+ synaptics_dsx_spi_device->id = 0;
+ synaptics_dsx_spi_device->num_resources = 0;
+ synaptics_dsx_spi_device->dev.parent = &spi->dev;
+ synaptics_dsx_spi_device->dev.platform_data = &hw_if;
+ synaptics_dsx_spi_device->dev.release = synaptics_rmi4_spi_dev_release;
+
+ retval = platform_device_register(synaptics_dsx_spi_device);
+ if (retval) {
+ dev_err(&spi->dev,
+ "%s: Failed to register platform device\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int synaptics_rmi4_spi_remove(struct spi_device *spi)
+{
+ platform_device_unregister(synaptics_dsx_spi_device);
+
+ return 0;
+}
+
+static const struct spi_device_id synaptics_rmi4_id_table[] = {
+ {SPI_DRIVER_NAME, 0},
+ {},
+};
+MODULE_DEVICE_TABLE(spi, synaptics_rmi4_id_table);
+
+#ifdef CONFIG_OF
+static struct of_device_id synaptics_rmi4_of_match_table[] = {
+ {
+ .compatible = "synaptics,dsx-spi",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, synaptics_rmi4_of_match_table);
+#else
+#define synaptics_rmi4_of_match_table NULL
+#endif
+
+static struct spi_driver synaptics_rmi4_spi_driver = {
+ .driver = {
+ .name = SPI_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = synaptics_rmi4_of_match_table,
+ },
+ .probe = synaptics_rmi4_spi_probe,
+ .remove = synaptics_rmi4_spi_remove,
+ .id_table = synaptics_rmi4_id_table,
+};
+
+
+int synaptics_rmi4_bus_init(void)
+{
+ return spi_register_driver(&synaptics_rmi4_spi_driver);
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_init);
+
+void synaptics_rmi4_bus_exit(void)
+{
+ kfree(buf);
+
+ kfree(xfer);
+
+ spi_unregister_driver(&synaptics_rmi4_spi_driver);
+
+ return;
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX SPI Bus Support Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_test_reporting.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_test_reporting.c
new file mode 100644
index 0000000..606e737
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_test_reporting.c
@@ -0,0 +1,5356 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/ctype.h>
+#include <linux/hrtimer.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define SYSFS_FOLDER_NAME "f54"
+
+#define GET_REPORT_TIMEOUT_S 3
+#define CALIBRATION_TIMEOUT_S 10
+#define COMMAND_TIMEOUT_100MS 20
+
+#define NO_SLEEP_OFF (0 << 2)
+#define NO_SLEEP_ON (1 << 2)
+
+#define STATUS_IDLE 0
+#define STATUS_BUSY 1
+#define STATUS_ERROR 2
+
+#define REPORT_INDEX_OFFSET 1
+#define REPORT_DATA_OFFSET 3
+
+#define SENSOR_RX_MAPPING_OFFSET 1
+#define SENSOR_TX_MAPPING_OFFSET 2
+
+#define COMMAND_GET_REPORT 1
+#define COMMAND_FORCE_CAL 2
+#define COMMAND_FORCE_UPDATE 4
+
+#define CONTROL_NO_AUTO_CAL 1
+
+#define CONTROL_0_SIZE 1
+#define CONTROL_1_SIZE 1
+#define CONTROL_2_SIZE 2
+#define CONTROL_3_SIZE 1
+#define CONTROL_4_6_SIZE 3
+#define CONTROL_7_SIZE 1
+#define CONTROL_8_9_SIZE 3
+#define CONTROL_10_SIZE 1
+#define CONTROL_11_SIZE 2
+#define CONTROL_12_13_SIZE 2
+#define CONTROL_14_SIZE 1
+#define CONTROL_15_SIZE 1
+#define CONTROL_16_SIZE 1
+#define CONTROL_17_SIZE 1
+#define CONTROL_18_SIZE 1
+#define CONTROL_19_SIZE 1
+#define CONTROL_20_SIZE 1
+#define CONTROL_21_SIZE 2
+#define CONTROL_22_26_SIZE 7
+#define CONTROL_27_SIZE 1
+#define CONTROL_28_SIZE 2
+#define CONTROL_29_SIZE 1
+#define CONTROL_30_SIZE 1
+#define CONTROL_31_SIZE 1
+#define CONTROL_32_35_SIZE 8
+#define CONTROL_36_SIZE 1
+#define CONTROL_37_SIZE 1
+#define CONTROL_38_SIZE 1
+#define CONTROL_39_SIZE 1
+#define CONTROL_40_SIZE 1
+#define CONTROL_41_SIZE 1
+#define CONTROL_42_SIZE 2
+#define CONTROL_43_54_SIZE 13
+#define CONTROL_55_56_SIZE 2
+#define CONTROL_57_SIZE 1
+#define CONTROL_58_SIZE 1
+#define CONTROL_59_SIZE 2
+#define CONTROL_60_62_SIZE 3
+#define CONTROL_63_SIZE 1
+#define CONTROL_64_67_SIZE 4
+#define CONTROL_68_73_SIZE 8
+#define CONTROL_70_73_SIZE 6
+#define CONTROL_74_SIZE 2
+#define CONTROL_75_SIZE 1
+#define CONTROL_76_SIZE 1
+#define CONTROL_77_78_SIZE 2
+#define CONTROL_79_83_SIZE 5
+#define CONTROL_84_85_SIZE 2
+#define CONTROL_86_SIZE 1
+#define CONTROL_87_SIZE 1
+#define CONTROL_88_SIZE 1
+#define CONTROL_89_SIZE 1
+#define CONTROL_90_SIZE 1
+#define CONTROL_91_SIZE 1
+#define CONTROL_92_SIZE 1
+#define CONTROL_93_SIZE 1
+#define CONTROL_94_SIZE 1
+#define CONTROL_95_SIZE 1
+#define CONTROL_96_SIZE 1
+#define CONTROL_97_SIZE 1
+#define CONTROL_98_SIZE 1
+#define CONTROL_99_SIZE 1
+#define CONTROL_100_SIZE 1
+#define CONTROL_101_SIZE 1
+#define CONTROL_102_SIZE 1
+#define CONTROL_103_SIZE 1
+#define CONTROL_104_SIZE 1
+#define CONTROL_105_SIZE 1
+#define CONTROL_106_SIZE 1
+#define CONTROL_107_SIZE 1
+#define CONTROL_108_SIZE 1
+#define CONTROL_109_SIZE 1
+#define CONTROL_110_SIZE 1
+#define CONTROL_111_SIZE 1
+#define CONTROL_112_SIZE 1
+#define CONTROL_113_SIZE 1
+#define CONTROL_114_SIZE 1
+#define CONTROL_115_SIZE 1
+#define CONTROL_116_SIZE 1
+#define CONTROL_117_SIZE 1
+#define CONTROL_118_SIZE 1
+#define CONTROL_119_SIZE 1
+#define CONTROL_120_SIZE 1
+#define CONTROL_121_SIZE 1
+#define CONTROL_122_SIZE 1
+#define CONTROL_123_SIZE 1
+#define CONTROL_124_SIZE 1
+#define CONTROL_125_SIZE 1
+#define CONTROL_126_SIZE 1
+#define CONTROL_127_SIZE 1
+#define CONTROL_128_SIZE 1
+#define CONTROL_129_SIZE 1
+#define CONTROL_130_SIZE 1
+#define CONTROL_131_SIZE 1
+#define CONTROL_132_SIZE 1
+#define CONTROL_133_SIZE 1
+#define CONTROL_134_SIZE 1
+#define CONTROL_135_SIZE 1
+#define CONTROL_136_SIZE 1
+#define CONTROL_137_SIZE 1
+#define CONTROL_138_SIZE 1
+#define CONTROL_139_SIZE 1
+#define CONTROL_140_SIZE 1
+#define CONTROL_141_SIZE 1
+#define CONTROL_142_SIZE 1
+#define CONTROL_143_SIZE 1
+#define CONTROL_144_SIZE 1
+#define CONTROL_145_SIZE 1
+#define CONTROL_146_SIZE 1
+#define CONTROL_147_SIZE 1
+#define CONTROL_148_SIZE 1
+#define CONTROL_149_SIZE 1
+#define CONTROL_150_SIZE 1
+#define CONTROL_151_SIZE 1
+#define CONTROL_152_SIZE 1
+#define CONTROL_153_SIZE 1
+#define CONTROL_154_SIZE 1
+#define CONTROL_155_SIZE 1
+#define CONTROL_156_SIZE 1
+#define CONTROL_157_158_SIZE 2
+#define CONTROL_163_SIZE 1
+#define CONTROL_165_SIZE 1
+#define CONTROL_166_SIZE 1
+#define CONTROL_167_SIZE 1
+#define CONTROL_168_SIZE 1
+#define CONTROL_169_SIZE 1
+#define CONTROL_171_SIZE 1
+#define CONTROL_172_SIZE 1
+#define CONTROL_173_SIZE 1
+#define CONTROL_174_SIZE 1
+#define CONTROL_175_SIZE 1
+#define CONTROL_176_SIZE 1
+#define CONTROL_177_178_SIZE 2
+#define CONTROL_179_SIZE 1
+#define CONTROL_182_SIZE 1
+#define CONTROL_183_SIZE 1
+#define CONTROL_185_SIZE 1
+#define CONTROL_186_SIZE 1
+#define CONTROL_187_SIZE 1
+#define CONTROL_188_SIZE 1
+
+#define HIGH_RESISTANCE_DATA_SIZE 6
+#define FULL_RAW_CAP_MIN_MAX_DATA_SIZE 4
+#define TRX_OPEN_SHORT_DATA_SIZE 7
+
+#define concat(a, b) a##b
+
+#define attrify(propname) (&dev_attr_##propname.attr)
+
+#define show_prototype(propname)\
+static ssize_t concat(test_sysfs, _##propname##_show)(\
+ struct device *dev,\
+ struct device_attribute *attr,\
+ char *buf);\
+\
+static struct device_attribute dev_attr_##propname =\
+ __ATTR(propname, 0444,\
+ concat(test_sysfs, _##propname##_show),\
+ synaptics_rmi4_store_error);
+
+#define store_prototype(propname)\
+static ssize_t concat(test_sysfs, _##propname##_store)(\
+ struct device *dev,\
+ struct device_attribute *attr,\
+ const char *buf, size_t count);\
+\
+static struct device_attribute dev_attr_##propname =\
+ __ATTR(propname, 0220,\
+ synaptics_rmi4_show_error,\
+ concat(test_sysfs, _##propname##_store));
+
+#define show_store_prototype(propname)\
+static ssize_t concat(test_sysfs, _##propname##_show)(\
+ struct device *dev,\
+ struct device_attribute *attr,\
+ char *buf);\
+\
+static ssize_t concat(test_sysfs, _##propname##_store)(\
+ struct device *dev,\
+ struct device_attribute *attr,\
+ const char *buf, size_t count);\
+\
+static struct device_attribute dev_attr_##propname =\
+ __ATTR(propname, 0664,\
+ concat(test_sysfs, _##propname##_show),\
+ concat(test_sysfs, _##propname##_store));
+
+#define disable_cbc(ctrl_num)\
+do {\
+ retval = synaptics_rmi4_reg_read(rmi4_data,\
+ f54->control.ctrl_num->address,\
+ f54->control.ctrl_num->data,\
+ sizeof(f54->control.ctrl_num->data));\
+ if (retval < 0) {\
+ dev_err(rmi4_data->pdev->dev.parent,\
+ "%s: Failed to disable CBC (" #ctrl_num ")\n",\
+ __func__);\
+ return retval;\
+ } \
+ f54->control.ctrl_num->cbc_tx_carrier_selection = 0;\
+ retval = synaptics_rmi4_reg_write(rmi4_data,\
+ f54->control.ctrl_num->address,\
+ f54->control.ctrl_num->data,\
+ sizeof(f54->control.ctrl_num->data));\
+ if (retval < 0) {\
+ dev_err(rmi4_data->pdev->dev.parent,\
+ "%s: Failed to disable CBC (" #ctrl_num ")\n",\
+ __func__);\
+ return retval;\
+ } \
+} while (0)
+
+enum f54_report_types {
+ F54_8BIT_IMAGE = 1,
+ F54_16BIT_IMAGE = 2,
+ F54_RAW_16BIT_IMAGE = 3,
+ F54_HIGH_RESISTANCE = 4,
+ F54_TX_TO_TX_SHORTS = 5,
+ F54_RX_TO_RX_SHORTS_1 = 7,
+ F54_TRUE_BASELINE = 9,
+ F54_FULL_RAW_CAP_MIN_MAX = 13,
+ F54_RX_OPENS_1 = 14,
+ F54_TX_OPENS = 15,
+ F54_TX_TO_GND_SHORTS = 16,
+ F54_RX_TO_RX_SHORTS_2 = 17,
+ F54_RX_OPENS_2 = 18,
+ F54_FULL_RAW_CAP = 19,
+ F54_FULL_RAW_CAP_NO_RX_COUPLING = 20,
+ F54_SENSOR_SPEED = 22,
+ F54_ADC_RANGE = 23,
+ F54_TRX_OPENS = 24,
+ F54_TRX_TO_GND_SHORTS = 25,
+ F54_TRX_SHORTS = 26,
+ F54_ABS_RAW_CAP = 38,
+ F54_ABS_DELTA_CAP = 40,
+ F54_ABS_HYBRID_DELTA_CAP = 59,
+ F54_ABS_HYBRID_RAW_CAP = 63,
+ F54_AMP_FULL_RAW_CAP = 78,
+ F54_AMP_RAW_ADC = 83,
+ F54_FULL_RAW_CAP_TDDI = 92,
+ INVALID_REPORT_TYPE = -1,
+};
+
+enum f54_afe_cal {
+ F54_AFE_CAL,
+ F54_AFE_IS_CAL,
+};
+
+struct f54_query {
+ union {
+ struct {
+ /* query 0 */
+ unsigned char num_of_rx_electrodes;
+
+ /* query 1 */
+ unsigned char num_of_tx_electrodes;
+
+ /* query 2 */
+ unsigned char f54_query2_b0__1:2;
+ unsigned char has_baseline:1;
+ unsigned char has_image8:1;
+ unsigned char f54_query2_b4__5:2;
+ unsigned char has_image16:1;
+ unsigned char f54_query2_b7:1;
+
+ /* queries 3.0 and 3.1 */
+ unsigned short clock_rate;
+
+ /* query 4 */
+ unsigned char touch_controller_family;
+
+ /* query 5 */
+ unsigned char has_pixel_touch_threshold_adjustment:1;
+ unsigned char f54_query5_b1__7:7;
+
+ /* query 6 */
+ unsigned char has_sensor_assignment:1;
+ unsigned char has_interference_metric:1;
+ unsigned char has_sense_frequency_control:1;
+ unsigned char has_firmware_noise_mitigation:1;
+ unsigned char has_ctrl11:1;
+ unsigned char has_two_byte_report_rate:1;
+ unsigned char has_one_byte_report_rate:1;
+ unsigned char has_relaxation_control:1;
+
+ /* query 7 */
+ unsigned char curve_compensation_mode:2;
+ unsigned char f54_query7_b2__7:6;
+
+ /* query 8 */
+ unsigned char f54_query8_b0:1;
+ unsigned char has_iir_filter:1;
+ unsigned char has_cmn_removal:1;
+ unsigned char has_cmn_maximum:1;
+ unsigned char has_touch_hysteresis:1;
+ unsigned char has_edge_compensation:1;
+ unsigned char has_per_frequency_noise_control:1;
+ unsigned char has_enhanced_stretch:1;
+
+ /* query 9 */
+ unsigned char has_force_fast_relaxation:1;
+ unsigned char has_multi_metric_state_machine:1;
+ unsigned char has_signal_clarity:1;
+ unsigned char has_variance_metric:1;
+ unsigned char has_0d_relaxation_control:1;
+ unsigned char has_0d_acquisition_control:1;
+ unsigned char has_status:1;
+ unsigned char has_slew_metric:1;
+
+ /* query 10 */
+ unsigned char has_h_blank:1;
+ unsigned char has_v_blank:1;
+ unsigned char has_long_h_blank:1;
+ unsigned char has_startup_fast_relaxation:1;
+ unsigned char has_esd_control:1;
+ unsigned char has_noise_mitigation2:1;
+ unsigned char has_noise_state:1;
+ unsigned char has_energy_ratio_relaxation:1;
+
+ /* query 11 */
+ unsigned char has_excessive_noise_reporting:1;
+ unsigned char has_slew_option:1;
+ unsigned char has_two_overhead_bursts:1;
+ unsigned char has_query13:1;
+ unsigned char has_one_overhead_burst:1;
+ unsigned char f54_query11_b5:1;
+ unsigned char has_ctrl88:1;
+ unsigned char has_query15:1;
+
+ /* query 12 */
+ unsigned char number_of_sensing_frequencies:4;
+ unsigned char f54_query12_b4__7:4;
+ } __packed;
+ unsigned char data[14];
+ };
+};
+
+struct f54_query_13 {
+ union {
+ struct {
+ unsigned char has_ctrl86:1;
+ unsigned char has_ctrl87:1;
+ unsigned char has_ctrl87_sub0:1;
+ unsigned char has_ctrl87_sub1:1;
+ unsigned char has_ctrl87_sub2:1;
+ unsigned char has_cidim:1;
+ unsigned char has_noise_mitigation_enhancement:1;
+ unsigned char has_rail_im:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_15 {
+ union {
+ struct {
+ unsigned char has_ctrl90:1;
+ unsigned char has_transmit_strength:1;
+ unsigned char has_ctrl87_sub3:1;
+ unsigned char has_query16:1;
+ unsigned char has_query20:1;
+ unsigned char has_query21:1;
+ unsigned char has_query22:1;
+ unsigned char has_query25:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_16 {
+ union {
+ struct {
+ unsigned char has_query17:1;
+ unsigned char has_data17:1;
+ unsigned char has_ctrl92:1;
+ unsigned char has_ctrl93:1;
+ unsigned char has_ctrl94_query18:1;
+ unsigned char has_ctrl95_query19:1;
+ unsigned char has_ctrl99:1;
+ unsigned char has_ctrl100:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_21 {
+ union {
+ struct {
+ unsigned char has_abs_rx:1;
+ unsigned char has_abs_tx:1;
+ unsigned char has_ctrl91:1;
+ unsigned char has_ctrl96:1;
+ unsigned char has_ctrl97:1;
+ unsigned char has_ctrl98:1;
+ unsigned char has_data19:1;
+ unsigned char has_query24_data18:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_22 {
+ union {
+ struct {
+ unsigned char has_packed_image:1;
+ unsigned char has_ctrl101:1;
+ unsigned char has_dynamic_sense_display_ratio:1;
+ unsigned char has_query23:1;
+ unsigned char has_ctrl103_query26:1;
+ unsigned char has_ctrl104:1;
+ unsigned char has_ctrl105:1;
+ unsigned char has_query28:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_23 {
+ union {
+ struct {
+ unsigned char has_ctrl102:1;
+ unsigned char has_ctrl102_sub1:1;
+ unsigned char has_ctrl102_sub2:1;
+ unsigned char has_ctrl102_sub4:1;
+ unsigned char has_ctrl102_sub5:1;
+ unsigned char has_ctrl102_sub9:1;
+ unsigned char has_ctrl102_sub10:1;
+ unsigned char has_ctrl102_sub11:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_25 {
+ union {
+ struct {
+ unsigned char has_ctrl106:1;
+ unsigned char has_ctrl102_sub12:1;
+ unsigned char has_ctrl107:1;
+ unsigned char has_ctrl108:1;
+ unsigned char has_ctrl109:1;
+ unsigned char has_data20:1;
+ unsigned char f54_query25_b6:1;
+ unsigned char has_query27:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_27 {
+ union {
+ struct {
+ unsigned char has_ctrl110:1;
+ unsigned char has_data21:1;
+ unsigned char has_ctrl111:1;
+ unsigned char has_ctrl112:1;
+ unsigned char has_ctrl113:1;
+ unsigned char has_data22:1;
+ unsigned char has_ctrl114:1;
+ unsigned char has_query29:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_29 {
+ union {
+ struct {
+ unsigned char has_ctrl115:1;
+ unsigned char has_ground_ring_options:1;
+ unsigned char has_lost_bursts_tuning:1;
+ unsigned char has_aux_exvcom2_select:1;
+ unsigned char has_ctrl116:1;
+ unsigned char has_data23:1;
+ unsigned char has_ctrl117:1;
+ unsigned char has_query30:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_30 {
+ union {
+ struct {
+ unsigned char has_ctrl118:1;
+ unsigned char has_ctrl119:1;
+ unsigned char has_ctrl120:1;
+ unsigned char has_ctrl121:1;
+ unsigned char has_ctrl122_query31:1;
+ unsigned char has_ctrl123:1;
+ unsigned char has_ctrl124:1;
+ unsigned char has_query32:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_32 {
+ union {
+ struct {
+ unsigned char has_ctrl125:1;
+ unsigned char has_ctrl126:1;
+ unsigned char has_ctrl127:1;
+ unsigned char has_abs_charge_pump_disable:1;
+ unsigned char has_query33:1;
+ unsigned char has_data24:1;
+ unsigned char has_query34:1;
+ unsigned char has_query35:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_33 {
+ union {
+ struct {
+ unsigned char has_ctrl128:1;
+ unsigned char has_ctrl129:1;
+ unsigned char has_ctrl130:1;
+ unsigned char has_ctrl131:1;
+ unsigned char has_ctrl132:1;
+ unsigned char has_ctrl133:1;
+ unsigned char has_ctrl134:1;
+ unsigned char has_query36:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_35 {
+ union {
+ struct {
+ unsigned char has_data25:1;
+ unsigned char has_ctrl135:1;
+ unsigned char has_ctrl136:1;
+ unsigned char has_ctrl137:1;
+ unsigned char has_ctrl138:1;
+ unsigned char has_ctrl139:1;
+ unsigned char has_data26:1;
+ unsigned char has_ctrl140:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_36 {
+ union {
+ struct {
+ unsigned char has_ctrl141:1;
+ unsigned char has_ctrl142:1;
+ unsigned char has_query37:1;
+ unsigned char has_ctrl143:1;
+ unsigned char has_ctrl144:1;
+ unsigned char has_ctrl145:1;
+ unsigned char has_ctrl146:1;
+ unsigned char has_query38:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_38 {
+ union {
+ struct {
+ unsigned char has_ctrl147:1;
+ unsigned char has_ctrl148:1;
+ unsigned char has_ctrl149:1;
+ unsigned char has_ctrl150:1;
+ unsigned char has_ctrl151:1;
+ unsigned char has_ctrl152:1;
+ unsigned char has_ctrl153:1;
+ unsigned char has_query39:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_39 {
+ union {
+ struct {
+ unsigned char has_ctrl154:1;
+ unsigned char has_ctrl155:1;
+ unsigned char has_ctrl156:1;
+ unsigned char has_ctrl160:1;
+ unsigned char has_ctrl157_ctrl158:1;
+ unsigned char f54_query39_b5__6:2;
+ unsigned char has_query40:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_40 {
+ union {
+ struct {
+ unsigned char has_ctrl169:1;
+ unsigned char has_ctrl163_query41:1;
+ unsigned char f54_query40_b2:1;
+ unsigned char has_ctrl165_query42:1;
+ unsigned char has_ctrl166:1;
+ unsigned char has_ctrl167:1;
+ unsigned char has_ctrl168:1;
+ unsigned char has_query43:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_43 {
+ union {
+ struct {
+ unsigned char f54_query43_b0__1:2;
+ unsigned char has_ctrl171:1;
+ unsigned char has_ctrl172_query44_query45:1;
+ unsigned char has_ctrl173:1;
+ unsigned char has_ctrl174:1;
+ unsigned char has_ctrl175:1;
+ unsigned char has_query46:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_46 {
+ union {
+ struct {
+ unsigned char has_ctrl176:1;
+ unsigned char has_ctrl177_ctrl178:1;
+ unsigned char has_ctrl179:1;
+ unsigned char f54_query46_b3:1;
+ unsigned char has_data27:1;
+ unsigned char has_data28:1;
+ unsigned char f54_query46_b6:1;
+ unsigned char has_query47:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_47 {
+ union {
+ struct {
+ unsigned char f54_query47_b0:1;
+ unsigned char has_ctrl182:1;
+ unsigned char has_ctrl183:1;
+ unsigned char f54_query47_b3:1;
+ unsigned char has_ctrl185:1;
+ unsigned char has_ctrl186:1;
+ unsigned char has_ctrl187:1;
+ unsigned char has_query49:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_49 {
+ union {
+ struct {
+ unsigned char f54_query49_b0__1:2;
+ unsigned char has_ctrl188:1;
+ unsigned char has_data31:1;
+ unsigned char f54_query49_b4__6:3;
+ unsigned char has_query50:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_50 {
+ union {
+ struct {
+ unsigned char f54_query50_b0__6:7;
+ unsigned char has_query51:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_51 {
+ union {
+ struct {
+ unsigned char f54_query51_b0__4:5;
+ unsigned char has_query53_query54_ctrl198:1;
+ unsigned char has_ctrl199:1;
+ unsigned char has_query55:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_55 {
+ union {
+ struct {
+ unsigned char has_query56:1;
+ unsigned char has_data33_data34:1;
+ unsigned char has_alt_report_rate:1;
+ unsigned char has_ctrl200:1;
+ unsigned char has_ctrl201_ctrl202:1;
+ unsigned char has_ctrl203:1;
+ unsigned char has_ctrl204:1;
+ unsigned char has_query57:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_57 {
+ union {
+ struct {
+ unsigned char has_ctrl205:1;
+ unsigned char has_ctrl206:1;
+ unsigned char has_usb_bulk_read:1;
+ unsigned char has_ctrl207:1;
+ unsigned char has_ctrl208:1;
+ unsigned char has_ctrl209:1;
+ unsigned char has_ctrl210:1;
+ unsigned char has_query58:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_58 {
+ union {
+ struct {
+ unsigned char has_query59:1;
+ unsigned char has_query60:1;
+ unsigned char has_ctrl211:1;
+ unsigned char has_ctrl212:1;
+ unsigned char has_hybrid_abs_tx_axis_filtering:1;
+ unsigned char has_hybrid_abs_tx_interpolation:1;
+ unsigned char has_ctrl213:1;
+ unsigned char has_query61:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_61 {
+ union {
+ struct {
+ unsigned char has_ctrl214:1;
+ unsigned char has_ctrl215_query62_query63:1;
+ unsigned char f54_query_61_b2:1;
+ unsigned char has_ctrl216:1;
+ unsigned char has_ctrl217:1;
+ unsigned char has_misc_host_ctrl:1;
+ unsigned char hybrid_abs_buttons:1;
+ unsigned char has_query64:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_64 {
+ union {
+ struct {
+ unsigned char has_ctrl101_sub1:1;
+ unsigned char has_ctrl220:1;
+ unsigned char has_ctrl221:1;
+ unsigned char has_ctrl222:1;
+ unsigned char has_ctrl219_sub1:1;
+ unsigned char has_ctrl103_sub3:1;
+ unsigned char has_ctrl224_ctrl226_ctrl227:1;
+ unsigned char has_query65:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_65 {
+ union {
+ struct {
+ unsigned char f54_query_65_b0__1:2;
+ unsigned char has_ctrl101_sub2:1;
+ unsigned char f54_query_65_b3__4:2;
+ unsigned char has_query66_ctrl231:1;
+ unsigned char has_ctrl232:1;
+ unsigned char has_query67:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_67 {
+ union {
+ struct {
+ unsigned char has_abs_doze_spatial_filter_en:1;
+ unsigned char has_abs_doze_avg_filter_enhancement_en:1;
+ unsigned char has_single_display_pulse:1;
+ unsigned char f54_query_67_b3__4:2;
+ unsigned char has_ctrl235_ctrl236:1;
+ unsigned char f54_query_67_b6:1;
+ unsigned char has_query68:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_68 {
+ union {
+ struct {
+ unsigned char f54_query_68_b0:1;
+ unsigned char has_ctrl238:1;
+ unsigned char has_ctrl238_sub1:1;
+ unsigned char has_ctrl238_sub2:1;
+ unsigned char has_ctrl239:1;
+ unsigned char has_freq_filter_bw_ext:1;
+ unsigned char is_tddi_hic:1;
+ unsigned char has_query69:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_query_69 {
+ union {
+ struct {
+ unsigned char has_ctrl240_sub0:1;
+ unsigned char has_ctrl240_sub1_sub2:1;
+ unsigned char has_ctrl240_sub3:1;
+ unsigned char has_ctrl240_sub4:1;
+ unsigned char f54_query_69_b4__7:4;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f54_data_31 {
+ union {
+ struct {
+ unsigned char is_calibration_crc:1;
+ unsigned char calibration_crc:1;
+ unsigned char short_test_row_number:5;
+ } __packed;
+ struct {
+ unsigned char data[1];
+ unsigned short address;
+ } __packed;
+ };
+};
+
+struct f54_control_7 {
+ union {
+ struct {
+ unsigned char cbc_cap:3;
+ unsigned char cbc_polarity:1;
+ unsigned char cbc_tx_carrier_selection:1;
+ unsigned char f54_ctrl7_b5__7:3;
+ } __packed;
+ struct {
+ unsigned char data[1];
+ unsigned short address;
+ } __packed;
+ };
+};
+
+struct f54_control_41 {
+ union {
+ struct {
+ unsigned char no_signal_clarity:1;
+ unsigned char f54_ctrl41_b1__7:7;
+ } __packed;
+ struct {
+ unsigned char data[1];
+ unsigned short address;
+ } __packed;
+ };
+};
+
+struct f54_control_57 {
+ union {
+ struct {
+ unsigned char cbc_cap:3;
+ unsigned char cbc_polarity:1;
+ unsigned char cbc_tx_carrier_selection:1;
+ unsigned char f54_ctrl57_b5__7:3;
+ } __packed;
+ struct {
+ unsigned char data[1];
+ unsigned short address;
+ } __packed;
+ };
+};
+
+struct f54_control_86 {
+ union {
+ struct {
+ unsigned char enable_high_noise_state:1;
+ unsigned char dynamic_sense_display_ratio:2;
+ unsigned char f54_ctrl86_b3__7:5;
+ } __packed;
+ struct {
+ unsigned char data[1];
+ unsigned short address;
+ } __packed;
+ };
+};
+
+struct f54_control_88 {
+ union {
+ struct {
+ unsigned char tx_low_reference_polarity:1;
+ unsigned char tx_high_reference_polarity:1;
+ unsigned char abs_low_reference_polarity:1;
+ unsigned char abs_polarity:1;
+ unsigned char cbc_polarity:1;
+ unsigned char cbc_tx_carrier_selection:1;
+ unsigned char charge_pump_enable:1;
+ unsigned char cbc_abs_auto_servo:1;
+ } __packed;
+ struct {
+ unsigned char data[1];
+ unsigned short address;
+ } __packed;
+ };
+};
+
+struct f54_control_110 {
+ union {
+ struct {
+ unsigned char active_stylus_rx_feedback_cap;
+ unsigned char active_stylus_rx_feedback_cap_reference;
+ unsigned char active_stylus_low_reference;
+ unsigned char active_stylus_high_reference;
+ unsigned char active_stylus_gain_control;
+ unsigned char active_stylus_gain_control_reference;
+ unsigned char active_stylus_timing_mode;
+ unsigned char active_stylus_discovery_bursts;
+ unsigned char active_stylus_detection_bursts;
+ unsigned char active_stylus_discovery_noise_multiplier;
+ unsigned char active_stylus_detection_envelope_min;
+ unsigned char active_stylus_detection_envelope_max;
+ unsigned char active_stylus_lose_count;
+ } __packed;
+ struct {
+ unsigned char data[13];
+ unsigned short address;
+ } __packed;
+ };
+};
+
+struct f54_control_149 {
+ union {
+ struct {
+ unsigned char trans_cbc_global_cap_enable:1;
+ unsigned char f54_ctrl149_b1__7:7;
+ } __packed;
+ struct {
+ unsigned char data[1];
+ unsigned short address;
+ } __packed;
+ };
+};
+
+struct f54_control_188 {
+ union {
+ struct {
+ unsigned char start_calibration:1;
+ unsigned char start_is_calibration:1;
+ unsigned char frequency:2;
+ unsigned char start_production_test:1;
+ unsigned char short_test_calibration:1;
+ unsigned char f54_ctrl188_b7:1;
+ } __packed;
+ struct {
+ unsigned char data[1];
+ unsigned short address;
+ } __packed;
+ };
+};
+
+struct f54_control {
+ struct f54_control_7 *reg_7;
+ struct f54_control_41 *reg_41;
+ struct f54_control_57 *reg_57;
+ struct f54_control_86 *reg_86;
+ struct f54_control_88 *reg_88;
+ struct f54_control_110 *reg_110;
+ struct f54_control_149 *reg_149;
+ struct f54_control_188 *reg_188;
+};
+
+struct synaptics_rmi4_f54_handle {
+ bool no_auto_cal;
+ bool skip_preparation;
+ unsigned char status;
+ unsigned char intr_mask;
+ unsigned char intr_reg_num;
+ unsigned char tx_assigned;
+ unsigned char rx_assigned;
+ unsigned char *report_data;
+ unsigned short query_base_addr;
+ unsigned short control_base_addr;
+ unsigned short data_base_addr;
+ unsigned short command_base_addr;
+ unsigned short fifoindex;
+ unsigned int report_size;
+ unsigned int data_buffer_size;
+ unsigned int data_pos;
+ enum f54_report_types report_type;
+ struct f54_query query;
+ struct f54_query_13 query_13;
+ struct f54_query_15 query_15;
+ struct f54_query_16 query_16;
+ struct f54_query_21 query_21;
+ struct f54_query_22 query_22;
+ struct f54_query_23 query_23;
+ struct f54_query_25 query_25;
+ struct f54_query_27 query_27;
+ struct f54_query_29 query_29;
+ struct f54_query_30 query_30;
+ struct f54_query_32 query_32;
+ struct f54_query_33 query_33;
+ struct f54_query_35 query_35;
+ struct f54_query_36 query_36;
+ struct f54_query_38 query_38;
+ struct f54_query_39 query_39;
+ struct f54_query_40 query_40;
+ struct f54_query_43 query_43;
+ struct f54_query_46 query_46;
+ struct f54_query_47 query_47;
+ struct f54_query_49 query_49;
+ struct f54_query_50 query_50;
+ struct f54_query_51 query_51;
+ struct f54_query_55 query_55;
+ struct f54_query_57 query_57;
+ struct f54_query_58 query_58;
+ struct f54_query_61 query_61;
+ struct f54_query_64 query_64;
+ struct f54_query_65 query_65;
+ struct f54_query_67 query_67;
+ struct f54_query_68 query_68;
+ struct f54_query_69 query_69;
+ struct f54_data_31 data_31;
+ struct f54_control control;
+ struct mutex status_mutex;
+ struct kobject *sysfs_dir;
+ struct hrtimer watchdog;
+ struct work_struct timeout_work;
+ struct work_struct test_report_work;
+ struct workqueue_struct *test_report_workqueue;
+ struct synaptics_rmi4_data *rmi4_data;
+};
+
+struct f55_query {
+ union {
+ struct {
+ /* query 0 */
+ unsigned char num_of_rx_electrodes;
+
+ /* query 1 */
+ unsigned char num_of_tx_electrodes;
+
+ /* query 2 */
+ unsigned char has_sensor_assignment:1;
+ unsigned char has_edge_compensation:1;
+ unsigned char curve_compensation_mode:2;
+ unsigned char has_ctrl6:1;
+ unsigned char has_alternate_transmitter_assignment:1;
+ unsigned char has_single_layer_multi_touch:1;
+ unsigned char has_query5:1;
+ } __packed;
+ unsigned char data[3];
+ };
+};
+
+struct f55_query_3 {
+ union {
+ struct {
+ unsigned char has_ctrl8:1;
+ unsigned char has_ctrl9:1;
+ unsigned char has_oncell_pattern_support:1;
+ unsigned char has_data0:1;
+ unsigned char has_single_wide_pattern_support:1;
+ unsigned char has_mirrored_tx_pattern_support:1;
+ unsigned char has_discrete_pattern_support:1;
+ unsigned char has_query9:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f55_query_5 {
+ union {
+ struct {
+ unsigned char has_corner_compensation:1;
+ unsigned char has_ctrl12:1;
+ unsigned char has_trx_configuration:1;
+ unsigned char has_ctrl13:1;
+ unsigned char f55_query5_b4:1;
+ unsigned char has_ctrl14:1;
+ unsigned char has_basis_function:1;
+ unsigned char has_query17:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f55_query_17 {
+ union {
+ struct {
+ unsigned char f55_query17_b0:1;
+ unsigned char has_ctrl16:1;
+ unsigned char has_ctrl18_ctrl19:1;
+ unsigned char has_ctrl17:1;
+ unsigned char has_ctrl20:1;
+ unsigned char has_ctrl21:1;
+ unsigned char has_ctrl22:1;
+ unsigned char has_query18:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f55_query_18 {
+ union {
+ struct {
+ unsigned char has_ctrl23:1;
+ unsigned char has_ctrl24:1;
+ unsigned char has_query19:1;
+ unsigned char has_ctrl25:1;
+ unsigned char has_ctrl26:1;
+ unsigned char has_ctrl27_query20:1;
+ unsigned char has_ctrl28_query21:1;
+ unsigned char has_query22:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f55_query_22 {
+ union {
+ struct {
+ unsigned char has_ctrl29:1;
+ unsigned char has_query23:1;
+ unsigned char has_guard_disable:1;
+ unsigned char has_ctrl30:1;
+ unsigned char has_ctrl31:1;
+ unsigned char has_ctrl32:1;
+ unsigned char has_query24_through_query27:1;
+ unsigned char has_query28:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f55_query_23 {
+ union {
+ struct {
+ unsigned char amp_sensor_enabled:1;
+ unsigned char image_transposed:1;
+ unsigned char first_column_at_left_side:1;
+ unsigned char size_of_column2mux:5;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f55_query_28 {
+ union {
+ struct {
+ unsigned char f55_query28_b0__4:5;
+ unsigned char has_ctrl37:1;
+ unsigned char has_query29:1;
+ unsigned char has_query30:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f55_query_30 {
+ union {
+ struct {
+ unsigned char has_ctrl38:1;
+ unsigned char has_query31_query32:1;
+ unsigned char has_ctrl39:1;
+ unsigned char has_ctrl40:1;
+ unsigned char has_ctrl41:1;
+ unsigned char has_ctrl42:1;
+ unsigned char has_ctrl43_ctrl44:1;
+ unsigned char has_query33:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f55_query_33 {
+ union {
+ struct {
+ unsigned char has_extended_amp_pad:1;
+ unsigned char has_extended_amp_btn:1;
+ unsigned char has_ctrl45_ctrl46:1;
+ unsigned char f55_query33_b3:1;
+ unsigned char has_ctrl47_sub0_sub1:1;
+ unsigned char f55_query33_b5__7:3;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f55_control_43 {
+ union {
+ struct {
+ unsigned char swap_sensor_side:1;
+ unsigned char f55_ctrl43_b1__7:7;
+ unsigned char afe_l_mux_size:4;
+ unsigned char afe_r_mux_size:4;
+ } __packed;
+ unsigned char data[2];
+ };
+};
+
+struct synaptics_rmi4_f55_handle {
+ bool amp_sensor;
+ bool extended_amp;
+ bool has_force;
+ unsigned char size_of_column2mux;
+ unsigned char afe_mux_offset;
+ unsigned char force_tx_offset;
+ unsigned char force_rx_offset;
+ unsigned char *tx_assignment;
+ unsigned char *rx_assignment;
+ unsigned char *force_tx_assignment;
+ unsigned char *force_rx_assignment;
+ unsigned short query_base_addr;
+ unsigned short control_base_addr;
+ unsigned short data_base_addr;
+ unsigned short command_base_addr;
+ struct f55_query query;
+ struct f55_query_3 query_3;
+ struct f55_query_5 query_5;
+ struct f55_query_17 query_17;
+ struct f55_query_18 query_18;
+ struct f55_query_22 query_22;
+ struct f55_query_23 query_23;
+ struct f55_query_28 query_28;
+ struct f55_query_30 query_30;
+ struct f55_query_33 query_33;
+};
+
+struct f21_query_2 {
+ union {
+ struct {
+ unsigned char size_of_query3;
+ struct {
+ unsigned char query0_is_present:1;
+ unsigned char query1_is_present:1;
+ unsigned char query2_is_present:1;
+ unsigned char query3_is_present:1;
+ unsigned char query4_is_present:1;
+ unsigned char query5_is_present:1;
+ unsigned char query6_is_present:1;
+ unsigned char query7_is_present:1;
+ } __packed;
+ struct {
+ unsigned char query8_is_present:1;
+ unsigned char query9_is_present:1;
+ unsigned char query10_is_present:1;
+ unsigned char query11_is_present:1;
+ unsigned char query12_is_present:1;
+ unsigned char query13_is_present:1;
+ unsigned char query14_is_present:1;
+ unsigned char query15_is_present:1;
+ } __packed;
+ };
+ unsigned char data[3];
+ };
+};
+
+struct f21_query_5 {
+ union {
+ struct {
+ unsigned char size_of_query6;
+ struct {
+ unsigned char ctrl0_is_present:1;
+ unsigned char ctrl1_is_present:1;
+ unsigned char ctrl2_is_present:1;
+ unsigned char ctrl3_is_present:1;
+ unsigned char ctrl4_is_present:1;
+ unsigned char ctrl5_is_present:1;
+ unsigned char ctrl6_is_present:1;
+ unsigned char ctrl7_is_present:1;
+ } __packed;
+ struct {
+ unsigned char ctrl8_is_present:1;
+ unsigned char ctrl9_is_present:1;
+ unsigned char ctrl10_is_present:1;
+ unsigned char ctrl11_is_present:1;
+ unsigned char ctrl12_is_present:1;
+ unsigned char ctrl13_is_present:1;
+ unsigned char ctrl14_is_present:1;
+ unsigned char ctrl15_is_present:1;
+ } __packed;
+ struct {
+ unsigned char ctrl16_is_present:1;
+ unsigned char ctrl17_is_present:1;
+ unsigned char ctrl18_is_present:1;
+ unsigned char ctrl19_is_present:1;
+ unsigned char ctrl20_is_present:1;
+ unsigned char ctrl21_is_present:1;
+ unsigned char ctrl22_is_present:1;
+ unsigned char ctrl23_is_present:1;
+ } __packed;
+ };
+ unsigned char data[4];
+ };
+};
+
+struct f21_query_11 {
+ union {
+ struct {
+ unsigned char has_high_resolution_force:1;
+ unsigned char has_force_sensing_txrx_mapping:1;
+ unsigned char f21_query11_00_b2__7:6;
+ unsigned char f21_query11_00_reserved;
+ unsigned char max_number_of_force_sensors;
+ unsigned char max_number_of_force_txs;
+ unsigned char max_number_of_force_rxs;
+ unsigned char f21_query11_01_reserved;
+ } __packed;
+ unsigned char data[6];
+ };
+};
+
+struct synaptics_rmi4_f21_handle {
+ bool has_force;
+ unsigned char tx_assigned;
+ unsigned char rx_assigned;
+ unsigned char max_num_of_tx;
+ unsigned char max_num_of_rx;
+ unsigned char max_num_of_txrx;
+ unsigned char *force_txrx_assignment;
+ unsigned short query_base_addr;
+ unsigned short control_base_addr;
+ unsigned short data_base_addr;
+ unsigned short command_base_addr;
+};
+
+show_prototype(num_of_mapped_tx)
+show_prototype(num_of_mapped_rx)
+show_prototype(tx_mapping)
+show_prototype(rx_mapping)
+show_prototype(num_of_mapped_force_tx)
+show_prototype(num_of_mapped_force_rx)
+show_prototype(force_tx_mapping)
+show_prototype(force_rx_mapping)
+show_prototype(report_size)
+show_prototype(status)
+store_prototype(do_preparation)
+store_prototype(force_cal)
+store_prototype(get_report)
+store_prototype(resume_touch)
+store_prototype(do_afe_calibration)
+show_store_prototype(report_type)
+show_store_prototype(fifoindex)
+show_store_prototype(no_auto_cal)
+show_store_prototype(read_report)
+
+static struct attribute *attrs[] = {
+ attrify(num_of_mapped_tx),
+ attrify(num_of_mapped_rx),
+ attrify(tx_mapping),
+ attrify(rx_mapping),
+ attrify(num_of_mapped_force_tx),
+ attrify(num_of_mapped_force_rx),
+ attrify(force_tx_mapping),
+ attrify(force_rx_mapping),
+ attrify(report_size),
+ attrify(status),
+ attrify(do_preparation),
+ attrify(force_cal),
+ attrify(get_report),
+ attrify(resume_touch),
+ attrify(do_afe_calibration),
+ attrify(report_type),
+ attrify(fifoindex),
+ attrify(no_auto_cal),
+ attrify(read_report),
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+static ssize_t test_sysfs_data_read(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count);
+
+static struct bin_attribute test_report_data = {
+ .attr = {
+ .name = "report_data",
+ .mode = 0444,
+ },
+ .size = 0,
+ .read = test_sysfs_data_read,
+};
+
+static struct synaptics_rmi4_f54_handle *f54;
+static struct synaptics_rmi4_f55_handle *f55;
+static struct synaptics_rmi4_f21_handle *f21;
+
+DECLARE_COMPLETION(test_remove_complete);
+
+static bool test_report_type_valid(enum f54_report_types report_type)
+{
+ switch (report_type) {
+ case F54_8BIT_IMAGE:
+ case F54_16BIT_IMAGE:
+ case F54_RAW_16BIT_IMAGE:
+ case F54_HIGH_RESISTANCE:
+ case F54_TX_TO_TX_SHORTS:
+ case F54_RX_TO_RX_SHORTS_1:
+ case F54_TRUE_BASELINE:
+ case F54_FULL_RAW_CAP_MIN_MAX:
+ case F54_RX_OPENS_1:
+ case F54_TX_OPENS:
+ case F54_TX_TO_GND_SHORTS:
+ case F54_RX_TO_RX_SHORTS_2:
+ case F54_RX_OPENS_2:
+ case F54_FULL_RAW_CAP:
+ case F54_FULL_RAW_CAP_NO_RX_COUPLING:
+ case F54_SENSOR_SPEED:
+ case F54_ADC_RANGE:
+ case F54_TRX_OPENS:
+ case F54_TRX_TO_GND_SHORTS:
+ case F54_TRX_SHORTS:
+ case F54_ABS_RAW_CAP:
+ case F54_ABS_DELTA_CAP:
+ case F54_ABS_HYBRID_DELTA_CAP:
+ case F54_ABS_HYBRID_RAW_CAP:
+ case F54_AMP_FULL_RAW_CAP:
+ case F54_AMP_RAW_ADC:
+ case F54_FULL_RAW_CAP_TDDI:
+ return true;
+ break;
+ default:
+ f54->report_type = INVALID_REPORT_TYPE;
+ f54->report_size = 0;
+ return false;
+ }
+}
+
+static void test_set_report_size(void)
+{
+ int retval;
+ unsigned char tx = f54->tx_assigned;
+ unsigned char rx = f54->rx_assigned;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ switch (f54->report_type) {
+ case F54_8BIT_IMAGE:
+ f54->report_size = tx * rx;
+ break;
+ case F54_16BIT_IMAGE:
+ case F54_RAW_16BIT_IMAGE:
+ case F54_TRUE_BASELINE:
+ case F54_FULL_RAW_CAP:
+ case F54_FULL_RAW_CAP_NO_RX_COUPLING:
+ case F54_SENSOR_SPEED:
+ case F54_AMP_FULL_RAW_CAP:
+ case F54_AMP_RAW_ADC:
+ case F54_FULL_RAW_CAP_TDDI:
+ f54->report_size = 2 * tx * rx;
+ break;
+ case F54_HIGH_RESISTANCE:
+ f54->report_size = HIGH_RESISTANCE_DATA_SIZE;
+ break;
+ case F54_TX_TO_TX_SHORTS:
+ case F54_TX_OPENS:
+ case F54_TX_TO_GND_SHORTS:
+ f54->report_size = (tx + 7) / 8;
+ break;
+ case F54_RX_TO_RX_SHORTS_1:
+ case F54_RX_OPENS_1:
+ if (rx < tx)
+ f54->report_size = 2 * rx * rx;
+ else
+ f54->report_size = 2 * tx * rx;
+ break;
+ case F54_FULL_RAW_CAP_MIN_MAX:
+ f54->report_size = FULL_RAW_CAP_MIN_MAX_DATA_SIZE;
+ break;
+ case F54_RX_TO_RX_SHORTS_2:
+ case F54_RX_OPENS_2:
+ if (rx <= tx)
+ f54->report_size = 0;
+ else
+ f54->report_size = 2 * rx * (rx - tx);
+ break;
+ case F54_ADC_RANGE:
+ if (f54->query.has_signal_clarity) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->control.reg_41->address,
+ f54->control.reg_41->data,
+ sizeof(f54->control.reg_41->data));
+ if (retval < 0) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read control reg_41\n",
+ __func__);
+ f54->report_size = 0;
+ break;
+ }
+ if (!f54->control.reg_41->no_signal_clarity) {
+ if (tx % 4)
+ tx += 4 - (tx % 4);
+ }
+ }
+ f54->report_size = 2 * tx * rx;
+ break;
+ case F54_TRX_OPENS:
+ case F54_TRX_TO_GND_SHORTS:
+ case F54_TRX_SHORTS:
+ f54->report_size = TRX_OPEN_SHORT_DATA_SIZE;
+ break;
+ case F54_ABS_RAW_CAP:
+ case F54_ABS_DELTA_CAP:
+ case F54_ABS_HYBRID_DELTA_CAP:
+ case F54_ABS_HYBRID_RAW_CAP:
+ tx += f21->tx_assigned;
+ rx += f21->rx_assigned;
+ f54->report_size = 4 * (tx + rx);
+ break;
+ default:
+ f54->report_size = 0;
+ }
+
+ return;
+}
+
+static int test_set_interrupt(bool set)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char zero = 0x00;
+ unsigned char *intr_mask;
+ unsigned short f01_ctrl_reg;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ intr_mask = rmi4_data->intr_mask;
+ f01_ctrl_reg = rmi4_data->f01_ctrl_base_addr + 1 + f54->intr_reg_num;
+
+ if (!set) {
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f01_ctrl_reg,
+ &zero,
+ sizeof(zero));
+ if (retval < 0)
+ return retval;
+ }
+
+ for (ii = 0; ii < rmi4_data->num_of_intr_regs; ii++) {
+ if (intr_mask[ii] != 0x00) {
+ f01_ctrl_reg = rmi4_data->f01_ctrl_base_addr + 1 + ii;
+ if (set) {
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f01_ctrl_reg,
+ &zero,
+ sizeof(zero));
+ if (retval < 0)
+ return retval;
+ } else {
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f01_ctrl_reg,
+ &(intr_mask[ii]),
+ sizeof(intr_mask[ii]));
+ if (retval < 0)
+ return retval;
+ }
+ }
+ }
+
+ f01_ctrl_reg = rmi4_data->f01_ctrl_base_addr + 1 + f54->intr_reg_num;
+
+ if (set) {
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f01_ctrl_reg,
+ &f54->intr_mask,
+ 1);
+ if (retval < 0)
+ return retval;
+ }
+
+ return 0;
+}
+
+static int test_wait_for_command_completion(void)
+{
+ int retval;
+ unsigned char value;
+ unsigned char timeout_count;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ timeout_count = 0;
+ do {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->command_base_addr,
+ &value,
+ sizeof(value));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read command register\n",
+ __func__);
+ return retval;
+ }
+
+ if (value == 0x00)
+ break;
+
+ msleep(100);
+ timeout_count++;
+ } while (timeout_count < COMMAND_TIMEOUT_100MS);
+
+ if (timeout_count == COMMAND_TIMEOUT_100MS) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Timed out waiting for command completion\n",
+ __func__);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int test_do_command(unsigned char command)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f54->command_base_addr,
+ &command,
+ sizeof(command));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write command\n",
+ __func__);
+ return retval;
+ }
+
+ retval = test_wait_for_command_completion();
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+static int test_do_preparation(void)
+{
+ int retval;
+ unsigned char value;
+ unsigned char zero = 0x00;
+ unsigned char device_ctrl;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ &device_ctrl,
+ sizeof(device_ctrl));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set no sleep\n",
+ __func__);
+ return retval;
+ }
+
+ device_ctrl |= NO_SLEEP_ON;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ &device_ctrl,
+ sizeof(device_ctrl));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set no sleep\n",
+ __func__);
+ return retval;
+ }
+
+ if (f54->skip_preparation)
+ return 0;
+
+ switch (f54->report_type) {
+ case F54_16BIT_IMAGE:
+ case F54_RAW_16BIT_IMAGE:
+ case F54_SENSOR_SPEED:
+ case F54_ADC_RANGE:
+ case F54_ABS_RAW_CAP:
+ case F54_ABS_DELTA_CAP:
+ case F54_ABS_HYBRID_DELTA_CAP:
+ case F54_ABS_HYBRID_RAW_CAP:
+ case F54_FULL_RAW_CAP_TDDI:
+ break;
+ case F54_AMP_RAW_ADC:
+ if (f54->query_49.has_ctrl188) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->control.reg_188->address,
+ f54->control.reg_188->data,
+ sizeof(f54->control.reg_188->data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set start production test\n",
+ __func__);
+ return retval;
+ }
+ f54->control.reg_188->start_production_test = 1;
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f54->control.reg_188->address,
+ f54->control.reg_188->data,
+ sizeof(f54->control.reg_188->data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set start production test\n",
+ __func__);
+ return retval;
+ }
+ }
+ break;
+ default:
+ if (f54->query.touch_controller_family == 1)
+ disable_cbc(reg_7);
+ else if (f54->query.has_ctrl88)
+ disable_cbc(reg_88);
+
+ if (f54->query.has_0d_acquisition_control)
+ disable_cbc(reg_57);
+
+ if ((f54->query.has_query15) &&
+ (f54->query_15.has_query25) &&
+ (f54->query_25.has_query27) &&
+ (f54->query_27.has_query29) &&
+ (f54->query_29.has_query30) &&
+ (f54->query_30.has_query32) &&
+ (f54->query_32.has_query33) &&
+ (f54->query_33.has_query36) &&
+ (f54->query_36.has_query38) &&
+ (f54->query_38.has_ctrl149)) {
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f54->control.reg_149->address,
+ &zero,
+ sizeof(f54->control.reg_149->data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to disable global CBC\n",
+ __func__);
+ return retval;
+ }
+ }
+
+ if (f54->query.has_signal_clarity) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->control.reg_41->address,
+ &value,
+ sizeof(f54->control.reg_41->data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to disable signal clarity\n",
+ __func__);
+ return retval;
+ }
+ value |= 0x01;
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f54->control.reg_41->address,
+ &value,
+ sizeof(f54->control.reg_41->data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to disable signal clarity\n",
+ __func__);
+ return retval;
+ }
+ }
+
+ retval = test_do_command(COMMAND_FORCE_UPDATE);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to do force update\n",
+ __func__);
+ return retval;
+ }
+
+ retval = test_do_command(COMMAND_FORCE_CAL);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to do force cal\n",
+ __func__);
+ return retval;
+ }
+ }
+
+ return 0;
+}
+
+static int test_do_afe_calibration(enum f54_afe_cal mode)
+{
+ int retval;
+ unsigned char timeout = CALIBRATION_TIMEOUT_S;
+ unsigned char timeout_count = 0;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->control.reg_188->address,
+ f54->control.reg_188->data,
+ sizeof(f54->control.reg_188->data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to start calibration\n",
+ __func__);
+ return retval;
+ }
+
+ if (mode == F54_AFE_CAL)
+ f54->control.reg_188->start_calibration = 1;
+ else if (mode == F54_AFE_IS_CAL)
+ f54->control.reg_188->start_is_calibration = 1;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f54->control.reg_188->address,
+ f54->control.reg_188->data,
+ sizeof(f54->control.reg_188->data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to start calibration\n",
+ __func__);
+ return retval;
+ }
+
+ do {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->control.reg_188->address,
+ f54->control.reg_188->data,
+ sizeof(f54->control.reg_188->data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to complete calibration\n",
+ __func__);
+ return retval;
+ }
+
+ if (mode == F54_AFE_CAL) {
+ if (!f54->control.reg_188->start_calibration)
+ break;
+ } else if (mode == F54_AFE_IS_CAL) {
+ if (!f54->control.reg_188->start_is_calibration)
+ break;
+ }
+
+ if (timeout_count == timeout) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Timed out waiting for calibration completion\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ timeout_count++;
+ msleep(1000);
+ } while (true);
+
+ /* check CRC */
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->data_31.address,
+ f54->data_31.data,
+ sizeof(f54->data_31.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read calibration CRC\n",
+ __func__);
+ return retval;
+ }
+
+ if (mode == F54_AFE_CAL) {
+ if (f54->data_31.calibration_crc == 0)
+ return 0;
+ } else if (mode == F54_AFE_IS_CAL) {
+ if (f54->data_31.is_calibration_crc == 0)
+ return 0;
+ }
+
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read calibration CRC\n",
+ __func__);
+
+ return -EINVAL;
+}
+
+static int test_check_for_idle_status(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ switch (f54->status) {
+ case STATUS_IDLE:
+ retval = 0;
+ break;
+ case STATUS_BUSY:
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Status busy\n",
+ __func__);
+ retval = -EINVAL;
+ break;
+ case STATUS_ERROR:
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Status error\n",
+ __func__);
+ retval = -EINVAL;
+ break;
+ default:
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Invalid status (%d)\n",
+ __func__, f54->status);
+ retval = -EINVAL;
+ }
+
+ return retval;
+}
+
+static void test_timeout_work(struct work_struct *work)
+{
+ int retval;
+ unsigned char command;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ mutex_lock(&f54->status_mutex);
+
+ if (f54->status == STATUS_BUSY) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->command_base_addr,
+ &command,
+ sizeof(command));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read command register\n",
+ __func__);
+ } else if (command & COMMAND_GET_REPORT) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Report type not supported by FW\n",
+ __func__);
+ } else {
+ queue_work(f54->test_report_workqueue,
+ &f54->test_report_work);
+ goto exit;
+ }
+ f54->status = STATUS_ERROR;
+ f54->report_size = 0;
+ }
+
+exit:
+ mutex_unlock(&f54->status_mutex);
+
+ return;
+}
+
+static enum hrtimer_restart test_get_report_timeout(struct hrtimer *timer)
+{
+ schedule_work(&(f54->timeout_work));
+
+ return HRTIMER_NORESTART;
+}
+
+static ssize_t test_sysfs_num_of_mapped_tx_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", f54->tx_assigned);
+}
+
+static ssize_t test_sysfs_num_of_mapped_rx_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", f54->rx_assigned);
+}
+
+static ssize_t test_sysfs_tx_mapping_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int cnt;
+ int count = 0;
+ unsigned char ii;
+ unsigned char tx_num;
+ unsigned char tx_electrodes;
+
+ if (!f55)
+ return -EINVAL;
+
+ tx_electrodes = f55->query.num_of_tx_electrodes;
+
+ for (ii = 0; ii < tx_electrodes; ii++) {
+ tx_num = f55->tx_assignment[ii];
+ if (tx_num == 0xff)
+ cnt = snprintf(buf, PAGE_SIZE - count, "xx ");
+ else
+ cnt = snprintf(buf, PAGE_SIZE - count, "%02u ", tx_num);
+ buf += cnt;
+ count += cnt;
+ }
+
+ snprintf(buf, PAGE_SIZE - count, "\n");
+ count++;
+
+ return count;
+}
+
+static ssize_t test_sysfs_rx_mapping_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int cnt;
+ int count = 0;
+ unsigned char ii;
+ unsigned char rx_num;
+ unsigned char rx_electrodes;
+
+ if (!f55)
+ return -EINVAL;
+
+ rx_electrodes = f55->query.num_of_rx_electrodes;
+
+ for (ii = 0; ii < rx_electrodes; ii++) {
+ rx_num = f55->rx_assignment[ii];
+ if (rx_num == 0xff)
+ cnt = snprintf(buf, PAGE_SIZE - count, "xx ");
+ else
+ cnt = snprintf(buf, PAGE_SIZE - count, "%02u ", rx_num);
+ buf += cnt;
+ count += cnt;
+ }
+
+ snprintf(buf, PAGE_SIZE - count, "\n");
+ count++;
+
+ return count;
+}
+
+static ssize_t test_sysfs_num_of_mapped_force_tx_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", f21->tx_assigned);
+}
+
+static ssize_t test_sysfs_num_of_mapped_force_rx_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", f21->rx_assigned);
+}
+
+static ssize_t test_sysfs_force_tx_mapping_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int cnt;
+ int count = 0;
+ unsigned char ii;
+ unsigned char tx_num;
+ unsigned char tx_electrodes;
+
+ if ((!f55 || !f55->has_force) && (!f21 || !f21->has_force))
+ return -EINVAL;
+
+ if (f55->has_force) {
+ tx_electrodes = f55->query.num_of_tx_electrodes;
+
+ for (ii = 0; ii < tx_electrodes; ii++) {
+ tx_num = f55->force_tx_assignment[ii];
+ if (tx_num == 0xff) {
+ cnt = snprintf(buf, PAGE_SIZE - count, "xx ");
+ } else {
+ cnt = snprintf(buf, PAGE_SIZE - count, "%02u ",
+ tx_num);
+ }
+ buf += cnt;
+ count += cnt;
+ }
+ } else if (f21->has_force) {
+ tx_electrodes = f21->max_num_of_tx;
+
+ for (ii = 0; ii < tx_electrodes; ii++) {
+ tx_num = f21->force_txrx_assignment[ii];
+ if (tx_num == 0xff) {
+ cnt = snprintf(buf, PAGE_SIZE - count, "xx ");
+ } else {
+ cnt = snprintf(buf, PAGE_SIZE - count, "%02u ",
+ tx_num);
+ }
+ buf += cnt;
+ count += cnt;
+ }
+ }
+
+ snprintf(buf, PAGE_SIZE - count, "\n");
+ count++;
+
+ return count;
+}
+
+static ssize_t test_sysfs_force_rx_mapping_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int cnt;
+ int count = 0;
+ unsigned char ii;
+ unsigned char offset;
+ unsigned char rx_num;
+ unsigned char rx_electrodes;
+
+ if ((!f55 || !f55->has_force) && (!f21 || !f21->has_force))
+ return -EINVAL;
+
+ if (f55->has_force) {
+ rx_electrodes = f55->query.num_of_rx_electrodes;
+
+ for (ii = 0; ii < rx_electrodes; ii++) {
+ rx_num = f55->force_rx_assignment[ii];
+ if (rx_num == 0xff)
+ cnt = snprintf(buf, PAGE_SIZE - count, "xx ");
+ else
+ cnt = snprintf(buf, PAGE_SIZE - count, "%02u ",
+ rx_num);
+ buf += cnt;
+ count += cnt;
+ }
+ } else if (f21->has_force) {
+ offset = f21->max_num_of_tx;
+ rx_electrodes = f21->max_num_of_rx;
+
+ for (ii = offset; ii < (rx_electrodes + offset); ii++) {
+ rx_num = f21->force_txrx_assignment[ii];
+ if (rx_num == 0xff)
+ cnt = snprintf(buf, PAGE_SIZE - count, "xx ");
+ else
+ cnt = snprintf(buf, PAGE_SIZE - count, "%02u ",
+ rx_num);
+ buf += cnt;
+ count += cnt;
+ }
+ }
+
+ snprintf(buf, PAGE_SIZE - count, "\n");
+ count++;
+
+ return count;
+}
+
+static ssize_t test_sysfs_report_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", f54->report_size);
+}
+
+static ssize_t test_sysfs_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+
+ mutex_lock(&f54->status_mutex);
+
+ retval = snprintf(buf, PAGE_SIZE, "%u\n", f54->status);
+
+ mutex_unlock(&f54->status_mutex);
+
+ return retval;
+}
+
+static ssize_t test_sysfs_do_preparation_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned long setting;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = sstrtoul(buf, 10, &setting);
+ if (retval)
+ return retval;
+
+ if (setting != 1)
+ return -EINVAL;
+
+ mutex_lock(&f54->status_mutex);
+
+ retval = test_check_for_idle_status();
+ if (retval < 0)
+ goto exit;
+
+ retval = test_do_preparation();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to do preparation\n",
+ __func__);
+ goto exit;
+ }
+
+ retval = count;
+
+exit:
+ mutex_unlock(&f54->status_mutex);
+
+ return retval;
+}
+
+static ssize_t test_sysfs_force_cal_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned long setting;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = sstrtoul(buf, 10, &setting);
+ if (retval)
+ return retval;
+
+ if (setting != 1)
+ return -EINVAL;
+
+ mutex_lock(&f54->status_mutex);
+
+ retval = test_check_for_idle_status();
+ if (retval < 0)
+ goto exit;
+
+ retval = test_do_command(COMMAND_FORCE_CAL);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to do force cal\n",
+ __func__);
+ goto exit;
+ }
+
+ retval = count;
+
+exit:
+ mutex_unlock(&f54->status_mutex);
+
+ return retval;
+}
+
+static ssize_t test_sysfs_get_report_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned char command;
+ unsigned long setting;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = sstrtoul(buf, 10, &setting);
+ if (retval)
+ return retval;
+
+ if (setting != 1)
+ return -EINVAL;
+
+ mutex_lock(&f54->status_mutex);
+
+ retval = test_check_for_idle_status();
+ if (retval < 0)
+ goto exit;
+
+ if (!test_report_type_valid(f54->report_type)) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Invalid report type\n",
+ __func__);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ test_set_interrupt(true);
+
+ command = (unsigned char)COMMAND_GET_REPORT;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f54->command_base_addr,
+ &command,
+ sizeof(command));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write get report command\n",
+ __func__);
+ goto exit;
+ }
+
+ f54->status = STATUS_BUSY;
+ f54->report_size = 0;
+ f54->data_pos = 0;
+
+ hrtimer_start(&f54->watchdog,
+ ktime_set(GET_REPORT_TIMEOUT_S, 0),
+ HRTIMER_MODE_REL);
+
+ retval = count;
+
+exit:
+ mutex_unlock(&f54->status_mutex);
+
+ return retval;
+}
+
+static ssize_t test_sysfs_resume_touch_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned char device_ctrl;
+ unsigned long setting;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = sstrtoul(buf, 10, &setting);
+ if (retval)
+ return retval;
+
+ if (setting != 1)
+ return -EINVAL;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ &device_ctrl,
+ sizeof(device_ctrl));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to restore no sleep setting\n",
+ __func__);
+ return retval;
+ }
+
+ device_ctrl = device_ctrl & ~NO_SLEEP_ON;
+ device_ctrl |= rmi4_data->no_sleep_setting;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ &device_ctrl,
+ sizeof(device_ctrl));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to restore no sleep setting\n",
+ __func__);
+ return retval;
+ }
+
+ test_set_interrupt(false);
+
+ if (f54->skip_preparation)
+ return count;
+
+ switch (f54->report_type) {
+ case F54_16BIT_IMAGE:
+ case F54_RAW_16BIT_IMAGE:
+ case F54_SENSOR_SPEED:
+ case F54_ADC_RANGE:
+ case F54_ABS_RAW_CAP:
+ case F54_ABS_DELTA_CAP:
+ case F54_ABS_HYBRID_DELTA_CAP:
+ case F54_ABS_HYBRID_RAW_CAP:
+ case F54_FULL_RAW_CAP_TDDI:
+ break;
+ case F54_AMP_RAW_ADC:
+ if (f54->query_49.has_ctrl188) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->control.reg_188->address,
+ f54->control.reg_188->data,
+ sizeof(f54->control.reg_188->data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set start production test\n",
+ __func__);
+ return retval;
+ }
+ f54->control.reg_188->start_production_test = 0;
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f54->control.reg_188->address,
+ f54->control.reg_188->data,
+ sizeof(f54->control.reg_188->data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set start production test\n",
+ __func__);
+ return retval;
+ }
+ }
+ break;
+ default:
+ rmi4_data->reset_device(rmi4_data, false);
+ }
+
+ return count;
+}
+
+static ssize_t test_sysfs_do_afe_calibration_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned long setting;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = sstrtoul(buf, 10, &setting);
+ if (retval)
+ return retval;
+
+ if (!f54->query_49.has_ctrl188) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: F54_ANALOG_Ctrl188 not found\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (setting == 0 || setting == 1)
+ retval = test_do_afe_calibration((enum f54_afe_cal)setting);
+ else
+ return -EINVAL;
+
+ if (retval)
+ return retval;
+ else
+ return count;
+}
+
+static ssize_t test_sysfs_report_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", f54->report_type);
+}
+
+static ssize_t test_sysfs_report_type_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned char data;
+ unsigned long setting;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = sstrtoul(buf, 10, &setting);
+ if (retval)
+ return retval;
+
+ mutex_lock(&f54->status_mutex);
+
+ retval = test_check_for_idle_status();
+ if (retval < 0)
+ goto exit;
+
+ if (!test_report_type_valid((enum f54_report_types)setting)) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Report type not supported by driver\n",
+ __func__);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ f54->report_type = (enum f54_report_types)setting;
+ data = (unsigned char)setting;
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f54->data_base_addr,
+ &data,
+ sizeof(data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write report type\n",
+ __func__);
+ goto exit;
+ }
+
+ retval = count;
+
+exit:
+ mutex_unlock(&f54->status_mutex);
+
+ return retval;
+}
+
+static ssize_t test_sysfs_fifoindex_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+ unsigned char data[2];
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->data_base_addr + REPORT_INDEX_OFFSET,
+ data,
+ sizeof(data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read report index\n",
+ __func__);
+ return retval;
+ }
+
+ batohs(&f54->fifoindex, data);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", f54->fifoindex);
+}
+
+static ssize_t test_sysfs_fifoindex_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned char data[2];
+ unsigned long setting;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = sstrtoul(buf, 10, &setting);
+ if (retval)
+ return retval;
+
+ f54->fifoindex = setting;
+
+ hstoba(data, (unsigned short)setting);
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f54->data_base_addr + REPORT_INDEX_OFFSET,
+ data,
+ sizeof(data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write report index\n",
+ __func__);
+ return retval;
+ }
+
+ return count;
+}
+
+static ssize_t test_sysfs_no_auto_cal_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", f54->no_auto_cal);
+}
+
+static ssize_t test_sysfs_no_auto_cal_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned char data;
+ unsigned long setting;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = sstrtoul(buf, 10, &setting);
+ if (retval)
+ return retval;
+
+ if (setting > 1)
+ return -EINVAL;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->control_base_addr,
+ &data,
+ sizeof(data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read no auto cal setting\n",
+ __func__);
+ return retval;
+ }
+
+ if (setting)
+ data |= CONTROL_NO_AUTO_CAL;
+ else
+ data &= ~CONTROL_NO_AUTO_CAL;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f54->control_base_addr,
+ &data,
+ sizeof(data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write no auto cal setting\n",
+ __func__);
+ return retval;
+ }
+
+ f54->no_auto_cal = (setting == 1);
+
+ return count;
+}
+
+static ssize_t test_sysfs_read_report_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned int ii;
+ unsigned int jj;
+ int cnt;
+ int count = 0;
+ int tx_num = f54->tx_assigned;
+ int rx_num = f54->rx_assigned;
+ char *report_data_8;
+ short *report_data_16;
+ int *report_data_32;
+ unsigned short *report_data_u16;
+ unsigned int *report_data_u32;
+
+ switch (f54->report_type) {
+ case F54_8BIT_IMAGE:
+ report_data_8 = (char *)f54->report_data;
+ for (ii = 0; ii < f54->report_size; ii++) {
+ cnt = snprintf(buf, PAGE_SIZE - count, "%03d: %d\n",
+ ii, *report_data_8);
+ report_data_8++;
+ buf += cnt;
+ count += cnt;
+ }
+ break;
+ case F54_AMP_RAW_ADC:
+ report_data_u16 = (unsigned short *)f54->report_data;
+ cnt = snprintf(buf, PAGE_SIZE - count, "tx = %d\nrx = %d\n",
+ tx_num, rx_num);
+ buf += cnt;
+ count += cnt;
+
+ for (ii = 0; ii < tx_num; ii++) {
+ for (jj = 0; jj < (rx_num - 1); jj++) {
+ cnt = snprintf(buf, PAGE_SIZE - count, "%-4d ",
+ *report_data_u16);
+ report_data_u16++;
+ buf += cnt;
+ count += cnt;
+ }
+ cnt = snprintf(buf, PAGE_SIZE - count, "%-4d\n",
+ *report_data_u16);
+ report_data_u16++;
+ buf += cnt;
+ count += cnt;
+ }
+ break;
+ case F54_16BIT_IMAGE:
+ case F54_RAW_16BIT_IMAGE:
+ case F54_TRUE_BASELINE:
+ case F54_FULL_RAW_CAP:
+ case F54_FULL_RAW_CAP_NO_RX_COUPLING:
+ case F54_SENSOR_SPEED:
+ case F54_AMP_FULL_RAW_CAP:
+ case F54_FULL_RAW_CAP_TDDI:
+ report_data_16 = (short *)f54->report_data;
+ cnt = snprintf(buf, PAGE_SIZE - count, "tx = %d\nrx = %d\n",
+ tx_num, rx_num);
+ buf += cnt;
+ count += cnt;
+
+ for (ii = 0; ii < tx_num; ii++) {
+ for (jj = 0; jj < (rx_num - 1); jj++) {
+ cnt = snprintf(buf, PAGE_SIZE - count, "%-4d ",
+ *report_data_16);
+ report_data_16++;
+ buf += cnt;
+ count += cnt;
+ }
+ cnt = snprintf(buf, PAGE_SIZE - count, "%-4d\n",
+ *report_data_16);
+ report_data_16++;
+ buf += cnt;
+ count += cnt;
+ }
+ break;
+ case F54_HIGH_RESISTANCE:
+ case F54_FULL_RAW_CAP_MIN_MAX:
+ report_data_16 = (short *)f54->report_data;
+ for (ii = 0; ii < f54->report_size; ii += 2) {
+ cnt = snprintf(buf, PAGE_SIZE - count, "%03d: %d\n",
+ ii / 2, *report_data_16);
+ report_data_16++;
+ buf += cnt;
+ count += cnt;
+ }
+ break;
+ case F54_ABS_RAW_CAP:
+ case F54_ABS_HYBRID_RAW_CAP:
+ tx_num += f21->tx_assigned;
+ rx_num += f21->rx_assigned;
+ report_data_u32 = (unsigned int *)f54->report_data;
+ cnt = snprintf(buf, PAGE_SIZE - count, "rx ");
+ buf += cnt;
+ count += cnt;
+ for (ii = 0; ii < rx_num; ii++) {
+ cnt = snprintf(buf, PAGE_SIZE - count, " %2d", ii);
+ buf += cnt;
+ count += cnt;
+ }
+ cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+ buf += cnt;
+ count += cnt;
+
+ cnt = snprintf(buf, PAGE_SIZE - count, " ");
+ buf += cnt;
+ count += cnt;
+ for (ii = 0; ii < rx_num; ii++) {
+ cnt = snprintf(buf, PAGE_SIZE - count, " %5u",
+ *report_data_u32);
+ report_data_u32++;
+ buf += cnt;
+ count += cnt;
+ }
+ cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+ buf += cnt;
+ count += cnt;
+
+ cnt = snprintf(buf, PAGE_SIZE - count, "tx ");
+ buf += cnt;
+ count += cnt;
+ for (ii = 0; ii < tx_num; ii++) {
+ cnt = snprintf(buf, PAGE_SIZE - count, " %2d", ii);
+ buf += cnt;
+ count += cnt;
+ }
+ cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+ buf += cnt;
+ count += cnt;
+
+ cnt = snprintf(buf, PAGE_SIZE - count, " ");
+ buf += cnt;
+ count += cnt;
+ for (ii = 0; ii < tx_num; ii++) {
+ cnt = snprintf(buf, PAGE_SIZE - count, " %5u",
+ *report_data_u32);
+ report_data_u32++;
+ buf += cnt;
+ count += cnt;
+ }
+ cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+ buf += cnt;
+ count += cnt;
+ break;
+ case F54_ABS_DELTA_CAP:
+ case F54_ABS_HYBRID_DELTA_CAP:
+ tx_num += f21->tx_assigned;
+ rx_num += f21->rx_assigned;
+ report_data_32 = (int *)f54->report_data;
+ cnt = snprintf(buf, PAGE_SIZE - count, "rx ");
+ buf += cnt;
+ count += cnt;
+ for (ii = 0; ii < rx_num; ii++) {
+ cnt = snprintf(buf, PAGE_SIZE - count, " %2d", ii);
+ buf += cnt;
+ count += cnt;
+ }
+ cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+ buf += cnt;
+ count += cnt;
+
+ cnt = snprintf(buf, PAGE_SIZE - count, " ");
+ buf += cnt;
+ count += cnt;
+ for (ii = 0; ii < rx_num; ii++) {
+ cnt = snprintf(buf, PAGE_SIZE - count, " %5d",
+ *report_data_32);
+ report_data_32++;
+ buf += cnt;
+ count += cnt;
+ }
+ cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+ buf += cnt;
+ count += cnt;
+
+ cnt = snprintf(buf, PAGE_SIZE - count, "tx ");
+ buf += cnt;
+ count += cnt;
+ for (ii = 0; ii < tx_num; ii++) {
+ cnt = snprintf(buf, PAGE_SIZE - count, " %2d", ii);
+ buf += cnt;
+ count += cnt;
+ }
+ cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+ buf += cnt;
+ count += cnt;
+
+ cnt = snprintf(buf, PAGE_SIZE - count, " ");
+ buf += cnt;
+ count += cnt;
+ for (ii = 0; ii < tx_num; ii++) {
+ cnt = snprintf(buf, PAGE_SIZE - count, " %5d",
+ *report_data_32);
+ report_data_32++;
+ buf += cnt;
+ count += cnt;
+ }
+ cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+ buf += cnt;
+ count += cnt;
+ break;
+ default:
+ for (ii = 0; ii < f54->report_size; ii++) {
+ cnt = snprintf(buf, PAGE_SIZE - count, "%03d: 0x%02x\n",
+ ii, f54->report_data[ii]);
+ buf += cnt;
+ count += cnt;
+ }
+ }
+
+ snprintf(buf, PAGE_SIZE - count, "\n");
+ count++;
+
+ return count;
+}
+
+static ssize_t test_sysfs_read_report_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned char timeout = GET_REPORT_TIMEOUT_S * 10;
+ unsigned char timeout_count;
+ const char cmd[] = {'1', 0};
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = test_sysfs_report_type_store(dev, attr, buf, count);
+ if (retval < 0)
+ goto exit;
+
+ retval = test_sysfs_do_preparation_store(dev, attr, cmd, 1);
+ if (retval < 0)
+ goto exit;
+
+ retval = test_sysfs_get_report_store(dev, attr, cmd, 1);
+ if (retval < 0)
+ goto exit;
+
+ timeout_count = 0;
+ do {
+ if (f54->status != STATUS_BUSY)
+ break;
+ msleep(100);
+ timeout_count++;
+ } while (timeout_count < timeout);
+
+ if ((f54->status != STATUS_IDLE) || (f54->report_size == 0)) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read report\n",
+ __func__);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ retval = test_sysfs_resume_touch_store(dev, attr, cmd, 1);
+ if (retval < 0)
+ goto exit;
+
+ return count;
+
+exit:
+ rmi4_data->reset_device(rmi4_data, false);
+
+ return retval;
+}
+
+static ssize_t test_sysfs_data_read(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count)
+{
+ int retval;
+ unsigned int read_size;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ mutex_lock(&f54->status_mutex);
+
+ retval = test_check_for_idle_status();
+ if (retval < 0)
+ goto exit;
+
+ if (!f54->report_data) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Report type %d data not available\n",
+ __func__, f54->report_type);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if ((f54->data_pos + count) > f54->report_size)
+ read_size = f54->report_size - f54->data_pos;
+ else
+ read_size = min_t(unsigned int, count, f54->report_size);
+
+ retval = secure_memcpy(buf, count, f54->report_data + f54->data_pos,
+ f54->data_buffer_size - f54->data_pos, read_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to copy report data\n",
+ __func__);
+ goto exit;
+ }
+ f54->data_pos += read_size;
+ retval = read_size;
+
+exit:
+ mutex_unlock(&f54->status_mutex);
+
+ return retval;
+}
+
+static void test_report_work(struct work_struct *work)
+{
+ int retval;
+ unsigned char report_index[2];
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ mutex_lock(&f54->status_mutex);
+
+ if (f54->status != STATUS_BUSY) {
+ retval = f54->status;
+ goto exit;
+ }
+
+ retval = test_wait_for_command_completion();
+ if (retval < 0) {
+ retval = STATUS_ERROR;
+ goto exit;
+ }
+
+ test_set_report_size();
+ if (f54->report_size == 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Report data size = 0\n",
+ __func__);
+ retval = STATUS_ERROR;
+ goto exit;
+ }
+
+ if (f54->data_buffer_size < f54->report_size) {
+ if (f54->data_buffer_size)
+ kfree(f54->report_data);
+ f54->report_data = kzalloc(f54->report_size, GFP_KERNEL);
+ if (!f54->report_data) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for data buffer\n",
+ __func__);
+ f54->data_buffer_size = 0;
+ retval = STATUS_ERROR;
+ goto exit;
+ }
+ f54->data_buffer_size = f54->report_size;
+ }
+
+ report_index[0] = 0;
+ report_index[1] = 0;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ f54->data_base_addr + REPORT_INDEX_OFFSET,
+ report_index,
+ sizeof(report_index));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to write report data index\n",
+ __func__);
+ retval = STATUS_ERROR;
+ goto exit;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->data_base_addr + REPORT_DATA_OFFSET,
+ f54->report_data,
+ f54->report_size);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read report data\n",
+ __func__);
+ retval = STATUS_ERROR;
+ goto exit;
+ }
+
+ retval = STATUS_IDLE;
+
+exit:
+ mutex_unlock(&f54->status_mutex);
+
+ if (retval == STATUS_ERROR)
+ f54->report_size = 0;
+
+ f54->status = retval;
+
+ return;
+}
+
+static void test_remove_sysfs(void)
+{
+ sysfs_remove_group(f54->sysfs_dir, &attr_group);
+ sysfs_remove_bin_file(f54->sysfs_dir, &test_report_data);
+ kobject_put(f54->sysfs_dir);
+
+ return;
+}
+
+static int test_set_sysfs(void)
+{
+ int retval;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ f54->sysfs_dir = kobject_create_and_add(SYSFS_FOLDER_NAME,
+ &rmi4_data->input_dev->dev.kobj);
+ if (!f54->sysfs_dir) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create sysfs directory\n",
+ __func__);
+ goto exit_directory;
+ }
+
+ retval = sysfs_create_bin_file(f54->sysfs_dir, &test_report_data);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create sysfs bin file\n",
+ __func__);
+ goto exit_bin_file;
+ }
+
+ retval = sysfs_create_group(f54->sysfs_dir, &attr_group);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create sysfs attributes\n",
+ __func__);
+ goto exit_attributes;
+ }
+
+ return 0;
+
+exit_attributes:
+ sysfs_remove_group(f54->sysfs_dir, &attr_group);
+ sysfs_remove_bin_file(f54->sysfs_dir, &test_report_data);
+
+exit_bin_file:
+ kobject_put(f54->sysfs_dir);
+
+exit_directory:
+ return -ENODEV;
+}
+
+static void test_free_control_mem(void)
+{
+ struct f54_control control = f54->control;
+
+ kfree(control.reg_7);
+ kfree(control.reg_41);
+ kfree(control.reg_57);
+ kfree(control.reg_86);
+ kfree(control.reg_88);
+ kfree(control.reg_110);
+ kfree(control.reg_149);
+ kfree(control.reg_188);
+
+ return;
+}
+
+static void test_set_data(void)
+{
+ unsigned short reg_addr;
+
+ reg_addr = f54->data_base_addr + REPORT_DATA_OFFSET + 1;
+
+ /* data 4 */
+ if (f54->query.has_sense_frequency_control)
+ reg_addr++;
+
+ /* data 5 reserved */
+
+ /* data 6 */
+ if (f54->query.has_interference_metric)
+ reg_addr += 2;
+
+ /* data 7 */
+ if (f54->query.has_one_byte_report_rate |
+ f54->query.has_two_byte_report_rate)
+ reg_addr++;
+ if (f54->query.has_two_byte_report_rate)
+ reg_addr++;
+
+ /* data 8 */
+ if (f54->query.has_variance_metric)
+ reg_addr += 2;
+
+ /* data 9 */
+ if (f54->query.has_multi_metric_state_machine)
+ reg_addr += 2;
+
+ /* data 10 */
+ if (f54->query.has_multi_metric_state_machine |
+ f54->query.has_noise_state)
+ reg_addr++;
+
+ /* data 11 */
+ if (f54->query.has_status)
+ reg_addr++;
+
+ /* data 12 */
+ if (f54->query.has_slew_metric)
+ reg_addr += 2;
+
+ /* data 13 */
+ if (f54->query.has_multi_metric_state_machine)
+ reg_addr += 2;
+
+ /* data 14 */
+ if (f54->query_13.has_cidim)
+ reg_addr++;
+
+ /* data 15 */
+ if (f54->query_13.has_rail_im)
+ reg_addr++;
+
+ /* data 16 */
+ if (f54->query_13.has_noise_mitigation_enhancement)
+ reg_addr++;
+
+ /* data 17 */
+ if (f54->query_16.has_data17)
+ reg_addr++;
+
+ /* data 18 */
+ if (f54->query_21.has_query24_data18)
+ reg_addr++;
+
+ /* data 19 */
+ if (f54->query_21.has_data19)
+ reg_addr++;
+
+ /* data_20 */
+ if (f54->query_25.has_ctrl109)
+ reg_addr++;
+
+ /* data 21 */
+ if (f54->query_27.has_data21)
+ reg_addr++;
+
+ /* data 22 */
+ if (f54->query_27.has_data22)
+ reg_addr++;
+
+ /* data 23 */
+ if (f54->query_29.has_data23)
+ reg_addr++;
+
+ /* data 24 */
+ if (f54->query_32.has_data24)
+ reg_addr++;
+
+ /* data 25 */
+ if (f54->query_35.has_data25)
+ reg_addr++;
+
+ /* data 26 */
+ if (f54->query_35.has_data26)
+ reg_addr++;
+
+ /* data 27 */
+ if (f54->query_46.has_data27)
+ reg_addr++;
+
+ /* data 28 */
+ if (f54->query_46.has_data28)
+ reg_addr++;
+
+ /* data 29 30 reserved */
+
+ /* data 31 */
+ if (f54->query_49.has_data31) {
+ f54->data_31.address = reg_addr;
+ reg_addr++;
+ }
+
+ return;
+}
+
+static int test_set_controls(void)
+{
+ int retval;
+ unsigned char length;
+ unsigned char num_of_sensing_freqs;
+ unsigned short reg_addr = f54->control_base_addr;
+ struct f54_control *control = &f54->control;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ num_of_sensing_freqs = f54->query.number_of_sensing_frequencies;
+
+ /* control 0 */
+ reg_addr += CONTROL_0_SIZE;
+
+ /* control 1 */
+ if ((f54->query.touch_controller_family == 0) ||
+ (f54->query.touch_controller_family == 1))
+ reg_addr += CONTROL_1_SIZE;
+
+ /* control 2 */
+ reg_addr += CONTROL_2_SIZE;
+
+ /* control 3 */
+ if (f54->query.has_pixel_touch_threshold_adjustment)
+ reg_addr += CONTROL_3_SIZE;
+
+ /* controls 4 5 6 */
+ if ((f54->query.touch_controller_family == 0) ||
+ (f54->query.touch_controller_family == 1))
+ reg_addr += CONTROL_4_6_SIZE;
+
+ /* control 7 */
+ if (f54->query.touch_controller_family == 1) {
+ control->reg_7 = kzalloc(sizeof(*(control->reg_7)),
+ GFP_KERNEL);
+ if (!control->reg_7)
+ goto exit_no_mem;
+ control->reg_7->address = reg_addr;
+ reg_addr += CONTROL_7_SIZE;
+ }
+
+ /* controls 8 9 */
+ if ((f54->query.touch_controller_family == 0) ||
+ (f54->query.touch_controller_family == 1))
+ reg_addr += CONTROL_8_9_SIZE;
+
+ /* control 10 */
+ if (f54->query.has_interference_metric)
+ reg_addr += CONTROL_10_SIZE;
+
+ /* control 11 */
+ if (f54->query.has_ctrl11)
+ reg_addr += CONTROL_11_SIZE;
+
+ /* controls 12 13 */
+ if (f54->query.has_relaxation_control)
+ reg_addr += CONTROL_12_13_SIZE;
+
+ /* controls 14 15 16 */
+ if (f54->query.has_sensor_assignment) {
+ reg_addr += CONTROL_14_SIZE;
+ reg_addr += CONTROL_15_SIZE * f54->query.num_of_rx_electrodes;
+ reg_addr += CONTROL_16_SIZE * f54->query.num_of_tx_electrodes;
+ }
+
+ /* controls 17 18 19 */
+ if (f54->query.has_sense_frequency_control) {
+ reg_addr += CONTROL_17_SIZE * num_of_sensing_freqs;
+ reg_addr += CONTROL_18_SIZE * num_of_sensing_freqs;
+ reg_addr += CONTROL_19_SIZE * num_of_sensing_freqs;
+ }
+
+ /* control 20 */
+ reg_addr += CONTROL_20_SIZE;
+
+ /* control 21 */
+ if (f54->query.has_sense_frequency_control)
+ reg_addr += CONTROL_21_SIZE;
+
+ /* controls 22 23 24 25 26 */
+ if (f54->query.has_firmware_noise_mitigation)
+ reg_addr += CONTROL_22_26_SIZE;
+
+ /* control 27 */
+ if (f54->query.has_iir_filter)
+ reg_addr += CONTROL_27_SIZE;
+
+ /* control 28 */
+ if (f54->query.has_firmware_noise_mitigation)
+ reg_addr += CONTROL_28_SIZE;
+
+ /* control 29 */
+ if (f54->query.has_cmn_removal)
+ reg_addr += CONTROL_29_SIZE;
+
+ /* control 30 */
+ if (f54->query.has_cmn_maximum)
+ reg_addr += CONTROL_30_SIZE;
+
+ /* control 31 */
+ if (f54->query.has_touch_hysteresis)
+ reg_addr += CONTROL_31_SIZE;
+
+ /* controls 32 33 34 35 */
+ if (f54->query.has_edge_compensation)
+ reg_addr += CONTROL_32_35_SIZE;
+
+ /* control 36 */
+ if ((f54->query.curve_compensation_mode == 1) ||
+ (f54->query.curve_compensation_mode == 2)) {
+ if (f54->query.curve_compensation_mode == 1) {
+ length = max(f54->query.num_of_rx_electrodes,
+ f54->query.num_of_tx_electrodes);
+ } else if (f54->query.curve_compensation_mode == 2) {
+ length = f54->query.num_of_rx_electrodes;
+ }
+ reg_addr += CONTROL_36_SIZE * length;
+ }
+
+ /* control 37 */
+ if (f54->query.curve_compensation_mode == 2)
+ reg_addr += CONTROL_37_SIZE * f54->query.num_of_tx_electrodes;
+
+ /* controls 38 39 40 */
+ if (f54->query.has_per_frequency_noise_control) {
+ reg_addr += CONTROL_38_SIZE * num_of_sensing_freqs;
+ reg_addr += CONTROL_39_SIZE * num_of_sensing_freqs;
+ reg_addr += CONTROL_40_SIZE * num_of_sensing_freqs;
+ }
+
+ /* control 41 */
+ if (f54->query.has_signal_clarity) {
+ control->reg_41 = kzalloc(sizeof(*(control->reg_41)),
+ GFP_KERNEL);
+ if (!control->reg_41)
+ goto exit_no_mem;
+ control->reg_41->address = reg_addr;
+ reg_addr += CONTROL_41_SIZE;
+ }
+
+ /* control 42 */
+ if (f54->query.has_variance_metric)
+ reg_addr += CONTROL_42_SIZE;
+
+ /* controls 43 44 45 46 47 48 49 50 51 52 53 54 */
+ if (f54->query.has_multi_metric_state_machine)
+ reg_addr += CONTROL_43_54_SIZE;
+
+ /* controls 55 56 */
+ if (f54->query.has_0d_relaxation_control)
+ reg_addr += CONTROL_55_56_SIZE;
+
+ /* control 57 */
+ if (f54->query.has_0d_acquisition_control) {
+ control->reg_57 = kzalloc(sizeof(*(control->reg_57)),
+ GFP_KERNEL);
+ if (!control->reg_57)
+ goto exit_no_mem;
+ control->reg_57->address = reg_addr;
+ reg_addr += CONTROL_57_SIZE;
+ }
+
+ /* control 58 */
+ if (f54->query.has_0d_acquisition_control)
+ reg_addr += CONTROL_58_SIZE;
+
+ /* control 59 */
+ if (f54->query.has_h_blank)
+ reg_addr += CONTROL_59_SIZE;
+
+ /* controls 60 61 62 */
+ if ((f54->query.has_h_blank) ||
+ (f54->query.has_v_blank) ||
+ (f54->query.has_long_h_blank))
+ reg_addr += CONTROL_60_62_SIZE;
+
+ /* control 63 */
+ if ((f54->query.has_h_blank) ||
+ (f54->query.has_v_blank) ||
+ (f54->query.has_long_h_blank) ||
+ (f54->query.has_slew_metric) ||
+ (f54->query.has_slew_option) ||
+ (f54->query.has_noise_mitigation2))
+ reg_addr += CONTROL_63_SIZE;
+
+ /* controls 64 65 66 67 */
+ if (f54->query.has_h_blank)
+ reg_addr += CONTROL_64_67_SIZE * 7;
+ else if ((f54->query.has_v_blank) ||
+ (f54->query.has_long_h_blank))
+ reg_addr += CONTROL_64_67_SIZE;
+
+ /* controls 68 69 70 71 72 73 */
+ if ((f54->query.has_h_blank) ||
+ (f54->query.has_v_blank) ||
+ (f54->query.has_long_h_blank)) {
+ if (f54->query_68.is_tddi_hic)
+ reg_addr += CONTROL_70_73_SIZE;
+ else
+ reg_addr += CONTROL_68_73_SIZE;
+ }
+
+ /* control 74 */
+ if (f54->query.has_slew_metric)
+ reg_addr += CONTROL_74_SIZE;
+
+ /* control 75 */
+ if (f54->query.has_enhanced_stretch)
+ reg_addr += CONTROL_75_SIZE * num_of_sensing_freqs;
+
+ /* control 76 */
+ if (f54->query.has_startup_fast_relaxation)
+ reg_addr += CONTROL_76_SIZE;
+
+ /* controls 77 78 */
+ if (f54->query.has_esd_control)
+ reg_addr += CONTROL_77_78_SIZE;
+
+ /* controls 79 80 81 82 83 */
+ if (f54->query.has_noise_mitigation2)
+ reg_addr += CONTROL_79_83_SIZE;
+
+ /* controls 84 85 */
+ if (f54->query.has_energy_ratio_relaxation)
+ reg_addr += CONTROL_84_85_SIZE;
+
+ /* control 86 */
+ if (f54->query_13.has_ctrl86) {
+ control->reg_86 = kzalloc(sizeof(*(control->reg_86)),
+ GFP_KERNEL);
+ if (!control->reg_86)
+ goto exit_no_mem;
+ control->reg_86->address = reg_addr;
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->control.reg_86->address,
+ f54->control.reg_86->data,
+ sizeof(f54->control.reg_86->data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read sense display ratio\n",
+ __func__);
+ return retval;
+ }
+ reg_addr += CONTROL_86_SIZE;
+ }
+
+ /* control 87 */
+ if (f54->query_13.has_ctrl87)
+ reg_addr += CONTROL_87_SIZE;
+
+ /* control 88 */
+ if (f54->query.has_ctrl88) {
+ control->reg_88 = kzalloc(sizeof(*(control->reg_88)),
+ GFP_KERNEL);
+ if (!control->reg_88)
+ goto exit_no_mem;
+ control->reg_88->address = reg_addr;
+ reg_addr += CONTROL_88_SIZE;
+ }
+
+ /* control 89 */
+ if (f54->query_13.has_cidim ||
+ f54->query_13.has_noise_mitigation_enhancement ||
+ f54->query_13.has_rail_im)
+ reg_addr += CONTROL_89_SIZE;
+
+ /* control 90 */
+ if (f54->query_15.has_ctrl90)
+ reg_addr += CONTROL_90_SIZE;
+
+ /* control 91 */
+ if (f54->query_21.has_ctrl91)
+ reg_addr += CONTROL_91_SIZE;
+
+ /* control 92 */
+ if (f54->query_16.has_ctrl92)
+ reg_addr += CONTROL_92_SIZE;
+
+ /* control 93 */
+ if (f54->query_16.has_ctrl93)
+ reg_addr += CONTROL_93_SIZE;
+
+ /* control 94 */
+ if (f54->query_16.has_ctrl94_query18)
+ reg_addr += CONTROL_94_SIZE;
+
+ /* control 95 */
+ if (f54->query_16.has_ctrl95_query19)
+ reg_addr += CONTROL_95_SIZE;
+
+ /* control 96 */
+ if (f54->query_21.has_ctrl96)
+ reg_addr += CONTROL_96_SIZE;
+
+ /* control 97 */
+ if (f54->query_21.has_ctrl97)
+ reg_addr += CONTROL_97_SIZE;
+
+ /* control 98 */
+ if (f54->query_21.has_ctrl98)
+ reg_addr += CONTROL_98_SIZE;
+
+ /* control 99 */
+ if (f54->query.touch_controller_family == 2)
+ reg_addr += CONTROL_99_SIZE;
+
+ /* control 100 */
+ if (f54->query_16.has_ctrl100)
+ reg_addr += CONTROL_100_SIZE;
+
+ /* control 101 */
+ if (f54->query_22.has_ctrl101)
+ reg_addr += CONTROL_101_SIZE;
+
+ /* control 102 */
+ if (f54->query_23.has_ctrl102)
+ reg_addr += CONTROL_102_SIZE;
+
+ /* control 103 */
+ if (f54->query_22.has_ctrl103_query26) {
+ f54->skip_preparation = true;
+ reg_addr += CONTROL_103_SIZE;
+ }
+
+ /* control 104 */
+ if (f54->query_22.has_ctrl104)
+ reg_addr += CONTROL_104_SIZE;
+
+ /* control 105 */
+ if (f54->query_22.has_ctrl105)
+ reg_addr += CONTROL_105_SIZE;
+
+ /* control 106 */
+ if (f54->query_25.has_ctrl106)
+ reg_addr += CONTROL_106_SIZE;
+
+ /* control 107 */
+ if (f54->query_25.has_ctrl107)
+ reg_addr += CONTROL_107_SIZE;
+
+ /* control 108 */
+ if (f54->query_25.has_ctrl108)
+ reg_addr += CONTROL_108_SIZE;
+
+ /* control 109 */
+ if (f54->query_25.has_ctrl109)
+ reg_addr += CONTROL_109_SIZE;
+
+ /* control 110 */
+ if (f54->query_27.has_ctrl110) {
+ control->reg_110 = kzalloc(sizeof(*(control->reg_110)),
+ GFP_KERNEL);
+ if (!control->reg_110)
+ goto exit_no_mem;
+ control->reg_110->address = reg_addr;
+ reg_addr += CONTROL_110_SIZE;
+ }
+
+ /* control 111 */
+ if (f54->query_27.has_ctrl111)
+ reg_addr += CONTROL_111_SIZE;
+
+ /* control 112 */
+ if (f54->query_27.has_ctrl112)
+ reg_addr += CONTROL_112_SIZE;
+
+ /* control 113 */
+ if (f54->query_27.has_ctrl113)
+ reg_addr += CONTROL_113_SIZE;
+
+ /* control 114 */
+ if (f54->query_27.has_ctrl114)
+ reg_addr += CONTROL_114_SIZE;
+
+ /* control 115 */
+ if (f54->query_29.has_ctrl115)
+ reg_addr += CONTROL_115_SIZE;
+
+ /* control 116 */
+ if (f54->query_29.has_ctrl116)
+ reg_addr += CONTROL_116_SIZE;
+
+ /* control 117 */
+ if (f54->query_29.has_ctrl117)
+ reg_addr += CONTROL_117_SIZE;
+
+ /* control 118 */
+ if (f54->query_30.has_ctrl118)
+ reg_addr += CONTROL_118_SIZE;
+
+ /* control 119 */
+ if (f54->query_30.has_ctrl119)
+ reg_addr += CONTROL_119_SIZE;
+
+ /* control 120 */
+ if (f54->query_30.has_ctrl120)
+ reg_addr += CONTROL_120_SIZE;
+
+ /* control 121 */
+ if (f54->query_30.has_ctrl121)
+ reg_addr += CONTROL_121_SIZE;
+
+ /* control 122 */
+ if (f54->query_30.has_ctrl122_query31)
+ reg_addr += CONTROL_122_SIZE;
+
+ /* control 123 */
+ if (f54->query_30.has_ctrl123)
+ reg_addr += CONTROL_123_SIZE;
+
+ /* control 124 */
+ if (f54->query_30.has_ctrl124)
+ reg_addr += CONTROL_124_SIZE;
+
+ /* control 125 */
+ if (f54->query_32.has_ctrl125)
+ reg_addr += CONTROL_125_SIZE;
+
+ /* control 126 */
+ if (f54->query_32.has_ctrl126)
+ reg_addr += CONTROL_126_SIZE;
+
+ /* control 127 */
+ if (f54->query_32.has_ctrl127)
+ reg_addr += CONTROL_127_SIZE;
+
+ /* control 128 */
+ if (f54->query_33.has_ctrl128)
+ reg_addr += CONTROL_128_SIZE;
+
+ /* control 129 */
+ if (f54->query_33.has_ctrl129)
+ reg_addr += CONTROL_129_SIZE;
+
+ /* control 130 */
+ if (f54->query_33.has_ctrl130)
+ reg_addr += CONTROL_130_SIZE;
+
+ /* control 131 */
+ if (f54->query_33.has_ctrl131)
+ reg_addr += CONTROL_131_SIZE;
+
+ /* control 132 */
+ if (f54->query_33.has_ctrl132)
+ reg_addr += CONTROL_132_SIZE;
+
+ /* control 133 */
+ if (f54->query_33.has_ctrl133)
+ reg_addr += CONTROL_133_SIZE;
+
+ /* control 134 */
+ if (f54->query_33.has_ctrl134)
+ reg_addr += CONTROL_134_SIZE;
+
+ /* control 135 */
+ if (f54->query_35.has_ctrl135)
+ reg_addr += CONTROL_135_SIZE;
+
+ /* control 136 */
+ if (f54->query_35.has_ctrl136)
+ reg_addr += CONTROL_136_SIZE;
+
+ /* control 137 */
+ if (f54->query_35.has_ctrl137)
+ reg_addr += CONTROL_137_SIZE;
+
+ /* control 138 */
+ if (f54->query_35.has_ctrl138)
+ reg_addr += CONTROL_138_SIZE;
+
+ /* control 139 */
+ if (f54->query_35.has_ctrl139)
+ reg_addr += CONTROL_139_SIZE;
+
+ /* control 140 */
+ if (f54->query_35.has_ctrl140)
+ reg_addr += CONTROL_140_SIZE;
+
+ /* control 141 */
+ if (f54->query_36.has_ctrl141)
+ reg_addr += CONTROL_141_SIZE;
+
+ /* control 142 */
+ if (f54->query_36.has_ctrl142)
+ reg_addr += CONTROL_142_SIZE;
+
+ /* control 143 */
+ if (f54->query_36.has_ctrl143)
+ reg_addr += CONTROL_143_SIZE;
+
+ /* control 144 */
+ if (f54->query_36.has_ctrl144)
+ reg_addr += CONTROL_144_SIZE;
+
+ /* control 145 */
+ if (f54->query_36.has_ctrl145)
+ reg_addr += CONTROL_145_SIZE;
+
+ /* control 146 */
+ if (f54->query_36.has_ctrl146)
+ reg_addr += CONTROL_146_SIZE;
+
+ /* control 147 */
+ if (f54->query_38.has_ctrl147)
+ reg_addr += CONTROL_147_SIZE;
+
+ /* control 148 */
+ if (f54->query_38.has_ctrl148)
+ reg_addr += CONTROL_148_SIZE;
+
+ /* control 149 */
+ if (f54->query_38.has_ctrl149) {
+ control->reg_149 = kzalloc(sizeof(*(control->reg_149)),
+ GFP_KERNEL);
+ if (!control->reg_149)
+ goto exit_no_mem;
+ control->reg_149->address = reg_addr;
+ reg_addr += CONTROL_149_SIZE;
+ }
+
+ /* control 150 */
+ if (f54->query_38.has_ctrl150)
+ reg_addr += CONTROL_150_SIZE;
+
+ /* control 151 */
+ if (f54->query_38.has_ctrl151)
+ reg_addr += CONTROL_151_SIZE;
+
+ /* control 152 */
+ if (f54->query_38.has_ctrl152)
+ reg_addr += CONTROL_152_SIZE;
+
+ /* control 153 */
+ if (f54->query_38.has_ctrl153)
+ reg_addr += CONTROL_153_SIZE;
+
+ /* control 154 */
+ if (f54->query_39.has_ctrl154)
+ reg_addr += CONTROL_154_SIZE;
+
+ /* control 155 */
+ if (f54->query_39.has_ctrl155)
+ reg_addr += CONTROL_155_SIZE;
+
+ /* control 156 */
+ if (f54->query_39.has_ctrl156)
+ reg_addr += CONTROL_156_SIZE;
+
+ /* controls 157 158 */
+ if (f54->query_39.has_ctrl157_ctrl158)
+ reg_addr += CONTROL_157_158_SIZE;
+
+ /* controls 159 to 162 reserved */
+
+ /* control 163 */
+ if (f54->query_40.has_ctrl163_query41)
+ reg_addr += CONTROL_163_SIZE;
+
+ /* control 164 reserved */
+
+ /* control 165 */
+ if (f54->query_40.has_ctrl165_query42)
+ reg_addr += CONTROL_165_SIZE;
+
+ /* control 166 */
+ if (f54->query_40.has_ctrl166)
+ reg_addr += CONTROL_166_SIZE;
+
+ /* control 167 */
+ if (f54->query_40.has_ctrl167)
+ reg_addr += CONTROL_167_SIZE;
+
+ /* control 168 */
+ if (f54->query_40.has_ctrl168)
+ reg_addr += CONTROL_168_SIZE;
+
+ /* control 169 */
+ if (f54->query_40.has_ctrl169)
+ reg_addr += CONTROL_169_SIZE;
+
+ /* control 170 reserved */
+
+ /* control 171 */
+ if (f54->query_43.has_ctrl171)
+ reg_addr += CONTROL_171_SIZE;
+
+ /* control 172 */
+ if (f54->query_43.has_ctrl172_query44_query45)
+ reg_addr += CONTROL_172_SIZE;
+
+ /* control 173 */
+ if (f54->query_43.has_ctrl173)
+ reg_addr += CONTROL_173_SIZE;
+
+ /* control 174 */
+ if (f54->query_43.has_ctrl174)
+ reg_addr += CONTROL_174_SIZE;
+
+ /* control 175 */
+ if (f54->query_43.has_ctrl175)
+ reg_addr += CONTROL_175_SIZE;
+
+ /* control 176 */
+ if (f54->query_46.has_ctrl176)
+ reg_addr += CONTROL_176_SIZE;
+
+ /* controls 177 178 */
+ if (f54->query_46.has_ctrl177_ctrl178)
+ reg_addr += CONTROL_177_178_SIZE;
+
+ /* control 179 */
+ if (f54->query_46.has_ctrl179)
+ reg_addr += CONTROL_179_SIZE;
+
+ /* controls 180 to 181 reserved */
+
+ /* control 182 */
+ if (f54->query_47.has_ctrl182)
+ reg_addr += CONTROL_182_SIZE;
+
+ /* control 183 */
+ if (f54->query_47.has_ctrl183)
+ reg_addr += CONTROL_183_SIZE;
+
+ /* control 184 reserved */
+
+ /* control 185 */
+ if (f54->query_47.has_ctrl185)
+ reg_addr += CONTROL_185_SIZE;
+
+ /* control 186 */
+ if (f54->query_47.has_ctrl186)
+ reg_addr += CONTROL_186_SIZE;
+
+ /* control 187 */
+ if (f54->query_47.has_ctrl187)
+ reg_addr += CONTROL_187_SIZE;
+
+ /* control 188 */
+ if (f54->query_49.has_ctrl188) {
+ control->reg_188 = kzalloc(sizeof(*(control->reg_188)),
+ GFP_KERNEL);
+ if (!control->reg_188)
+ goto exit_no_mem;
+ control->reg_188->address = reg_addr;
+ reg_addr += CONTROL_188_SIZE;
+ }
+
+ return 0;
+
+exit_no_mem:
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for control registers\n",
+ __func__);
+ return -ENOMEM;
+}
+
+static int test_set_queries(void)
+{
+ int retval;
+ unsigned char offset;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr,
+ f54->query.data,
+ sizeof(f54->query.data));
+ if (retval < 0)
+ return retval;
+
+ offset = sizeof(f54->query.data);
+
+ /* query 12 */
+ if (f54->query.has_sense_frequency_control == 0)
+ offset -= 1;
+
+ /* query 13 */
+ if (f54->query.has_query13) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_13.data,
+ sizeof(f54->query_13.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 14 */
+ if (f54->query_13.has_ctrl87)
+ offset += 1;
+
+ /* query 15 */
+ if (f54->query.has_query15) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_15.data,
+ sizeof(f54->query_15.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 16 */
+ if (f54->query_15.has_query16) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_16.data,
+ sizeof(f54->query_16.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 17 */
+ if (f54->query_16.has_query17)
+ offset += 1;
+
+ /* query 18 */
+ if (f54->query_16.has_ctrl94_query18)
+ offset += 1;
+
+ /* query 19 */
+ if (f54->query_16.has_ctrl95_query19)
+ offset += 1;
+
+ /* query 20 */
+ if (f54->query_15.has_query20)
+ offset += 1;
+
+ /* query 21 */
+ if (f54->query_15.has_query21) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_21.data,
+ sizeof(f54->query_21.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 22 */
+ if (f54->query_15.has_query22) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_22.data,
+ sizeof(f54->query_22.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 23 */
+ if (f54->query_22.has_query23) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_23.data,
+ sizeof(f54->query_23.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 24 */
+ if (f54->query_21.has_query24_data18)
+ offset += 1;
+
+ /* query 25 */
+ if (f54->query_15.has_query25) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_25.data,
+ sizeof(f54->query_25.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 26 */
+ if (f54->query_22.has_ctrl103_query26)
+ offset += 1;
+
+ /* query 27 */
+ if (f54->query_25.has_query27) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_27.data,
+ sizeof(f54->query_27.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 28 */
+ if (f54->query_22.has_query28)
+ offset += 1;
+
+ /* query 29 */
+ if (f54->query_27.has_query29) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_29.data,
+ sizeof(f54->query_29.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 30 */
+ if (f54->query_29.has_query30) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_30.data,
+ sizeof(f54->query_30.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 31 */
+ if (f54->query_30.has_ctrl122_query31)
+ offset += 1;
+
+ /* query 32 */
+ if (f54->query_30.has_query32) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_32.data,
+ sizeof(f54->query_32.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 33 */
+ if (f54->query_32.has_query33) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_33.data,
+ sizeof(f54->query_33.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 34 */
+ if (f54->query_32.has_query34)
+ offset += 1;
+
+ /* query 35 */
+ if (f54->query_32.has_query35) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_35.data,
+ sizeof(f54->query_35.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 36 */
+ if (f54->query_33.has_query36) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_36.data,
+ sizeof(f54->query_36.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 37 */
+ if (f54->query_36.has_query37)
+ offset += 1;
+
+ /* query 38 */
+ if (f54->query_36.has_query38) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_38.data,
+ sizeof(f54->query_38.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 39 */
+ if (f54->query_38.has_query39) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_39.data,
+ sizeof(f54->query_39.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 40 */
+ if (f54->query_39.has_query40) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_40.data,
+ sizeof(f54->query_40.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 41 */
+ if (f54->query_40.has_ctrl163_query41)
+ offset += 1;
+
+ /* query 42 */
+ if (f54->query_40.has_ctrl165_query42)
+ offset += 1;
+
+ /* query 43 */
+ if (f54->query_40.has_query43) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_43.data,
+ sizeof(f54->query_43.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ if (f54->query_43.has_ctrl172_query44_query45)
+ offset += 2;
+
+ /* query 46 */
+ if (f54->query_43.has_query46) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_46.data,
+ sizeof(f54->query_46.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 47 */
+ if (f54->query_46.has_query47) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_47.data,
+ sizeof(f54->query_47.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 48 reserved */
+
+ /* query 49 */
+ if (f54->query_47.has_query49) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_49.data,
+ sizeof(f54->query_49.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 50 */
+ if (f54->query_49.has_query50) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_50.data,
+ sizeof(f54->query_50.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 51 */
+ if (f54->query_50.has_query51) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_51.data,
+ sizeof(f54->query_51.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 53 54 */
+ if (f54->query_51.has_query53_query54_ctrl198)
+ offset += 2;
+
+ /* query 55 */
+ if (f54->query_51.has_query55) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_55.data,
+ sizeof(f54->query_55.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 56 */
+ if (f54->query_55.has_query56)
+ offset += 1;
+
+ /* query 57 */
+ if (f54->query_55.has_query57) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_57.data,
+ sizeof(f54->query_57.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 58 */
+ if (f54->query_57.has_query58) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_58.data,
+ sizeof(f54->query_58.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 59 */
+ if (f54->query_58.has_query59)
+ offset += 1;
+
+ /* query 60 */
+ if (f54->query_58.has_query60)
+ offset += 1;
+
+ /* query 61 */
+ if (f54->query_58.has_query61) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_61.data,
+ sizeof(f54->query_61.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 62 63 */
+ if (f54->query_61.has_ctrl215_query62_query63)
+ offset += 2;
+
+ /* query 64 */
+ if (f54->query_61.has_query64) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_64.data,
+ sizeof(f54->query_64.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 65 */
+ if (f54->query_64.has_query65) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_65.data,
+ sizeof(f54->query_65.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 66 */
+ if (f54->query_65.has_query66_ctrl231)
+ offset += 1;
+
+ /* query 67 */
+ if (f54->query_65.has_query67) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_67.data,
+ sizeof(f54->query_67.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 68 */
+ if (f54->query_67.has_query68) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_68.data,
+ sizeof(f54->query_68.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 68 */
+ if (f54->query_68.has_query69) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f54->query_base_addr + offset,
+ f54->query_69.data,
+ sizeof(f54->query_69.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ return 0;
+}
+
+static void test_f54_set_regs(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn_desc *fd,
+ unsigned int intr_count,
+ unsigned char page)
+{
+ unsigned char ii;
+ unsigned char intr_offset;
+
+ f54->query_base_addr = fd->query_base_addr | (page << 8);
+ f54->control_base_addr = fd->ctrl_base_addr | (page << 8);
+ f54->data_base_addr = fd->data_base_addr | (page << 8);
+ f54->command_base_addr = fd->cmd_base_addr | (page << 8);
+
+ f54->intr_reg_num = (intr_count + 7) / 8;
+ if (f54->intr_reg_num != 0)
+ f54->intr_reg_num -= 1;
+
+ f54->intr_mask = 0;
+ intr_offset = intr_count % 8;
+ for (ii = intr_offset;
+ ii < (fd->intr_src_count + intr_offset);
+ ii++) {
+ f54->intr_mask |= 1 << ii;
+ }
+
+ return;
+}
+
+static int test_f55_set_controls(void)
+{
+ unsigned char offset = 0;
+
+ /* controls 0 1 2 */
+ if (f55->query.has_sensor_assignment)
+ offset += 3;
+
+ /* control 3 */
+ if (f55->query.has_edge_compensation)
+ offset++;
+
+ /* control 4 */
+ if (f55->query.curve_compensation_mode == 0x1 ||
+ f55->query.curve_compensation_mode == 0x2)
+ offset++;
+
+ /* control 5 */
+ if (f55->query.curve_compensation_mode == 0x2)
+ offset++;
+
+ /* control 6 */
+ if (f55->query.has_ctrl6)
+ offset++;
+
+ /* control 7 */
+ if (f55->query.has_alternate_transmitter_assignment)
+ offset++;
+
+ /* control 8 */
+ if (f55->query_3.has_ctrl8)
+ offset++;
+
+ /* control 9 */
+ if (f55->query_3.has_ctrl9)
+ offset++;
+
+ /* control 10 */
+ if (f55->query_5.has_corner_compensation)
+ offset++;
+
+ /* control 11 */
+ if (f55->query.curve_compensation_mode == 0x3)
+ offset++;
+
+ /* control 12 */
+ if (f55->query_5.has_ctrl12)
+ offset++;
+
+ /* control 13 */
+ if (f55->query_5.has_ctrl13)
+ offset++;
+
+ /* control 14 */
+ if (f55->query_5.has_ctrl14)
+ offset++;
+
+ /* control 15 */
+ if (f55->query_5.has_basis_function)
+ offset++;
+
+ /* control 16 */
+ if (f55->query_17.has_ctrl16)
+ offset++;
+
+ /* control 17 */
+ if (f55->query_17.has_ctrl17)
+ offset++;
+
+ /* controls 18 19 */
+ if (f55->query_17.has_ctrl18_ctrl19)
+ offset += 2;
+
+ /* control 20 */
+ if (f55->query_17.has_ctrl20)
+ offset++;
+
+ /* control 21 */
+ if (f55->query_17.has_ctrl21)
+ offset++;
+
+ /* control 22 */
+ if (f55->query_17.has_ctrl22)
+ offset++;
+
+ /* control 23 */
+ if (f55->query_18.has_ctrl23)
+ offset++;
+
+ /* control 24 */
+ if (f55->query_18.has_ctrl24)
+ offset++;
+
+ /* control 25 */
+ if (f55->query_18.has_ctrl25)
+ offset++;
+
+ /* control 26 */
+ if (f55->query_18.has_ctrl26)
+ offset++;
+
+ /* control 27 */
+ if (f55->query_18.has_ctrl27_query20)
+ offset++;
+
+ /* control 28 */
+ if (f55->query_18.has_ctrl28_query21)
+ offset++;
+
+ /* control 29 */
+ if (f55->query_22.has_ctrl29)
+ offset++;
+
+ /* control 30 */
+ if (f55->query_22.has_ctrl30)
+ offset++;
+
+ /* control 31 */
+ if (f55->query_22.has_ctrl31)
+ offset++;
+
+ /* control 32 */
+ if (f55->query_22.has_ctrl32)
+ offset++;
+
+ /* controls 33 34 35 36 reserved */
+
+ /* control 37 */
+ if (f55->query_28.has_ctrl37)
+ offset++;
+
+ /* control 38 */
+ if (f55->query_30.has_ctrl38)
+ offset++;
+
+ /* control 39 */
+ if (f55->query_30.has_ctrl39)
+ offset++;
+
+ /* control 40 */
+ if (f55->query_30.has_ctrl40)
+ offset++;
+
+ /* control 41 */
+ if (f55->query_30.has_ctrl41)
+ offset++;
+
+ /* control 42 */
+ if (f55->query_30.has_ctrl42)
+ offset++;
+
+ /* controls 43 44 */
+ if (f55->query_30.has_ctrl43_ctrl44) {
+ f55->afe_mux_offset = offset;
+ offset += 2;
+ }
+
+ /* controls 45 46 */
+ if (f55->query_33.has_ctrl45_ctrl46) {
+ f55->has_force = true;
+ f55->force_tx_offset = offset;
+ f55->force_rx_offset = offset + 1;
+ offset += 2;
+ }
+
+ return 0;
+}
+
+static int test_f55_set_queries(void)
+{
+ int retval;
+ unsigned char offset;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f55->query_base_addr,
+ f55->query.data,
+ sizeof(f55->query.data));
+ if (retval < 0)
+ return retval;
+
+ offset = sizeof(f55->query.data);
+
+ /* query 3 */
+ if (f55->query.has_single_layer_multi_touch) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f55->query_base_addr + offset,
+ f55->query_3.data,
+ sizeof(f55->query_3.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 4 */
+ if (f55->query_3.has_ctrl9)
+ offset += 1;
+
+ /* query 5 */
+ if (f55->query.has_query5) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f55->query_base_addr + offset,
+ f55->query_5.data,
+ sizeof(f55->query_5.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* queries 6 7 */
+ if (f55->query.curve_compensation_mode == 0x3)
+ offset += 2;
+
+ /* query 8 */
+ if (f55->query_3.has_ctrl8)
+ offset += 1;
+
+ /* query 9 */
+ if (f55->query_3.has_query9)
+ offset += 1;
+
+ /* queries 10 11 12 13 14 15 16 */
+ if (f55->query_5.has_basis_function)
+ offset += 7;
+
+ /* query 17 */
+ if (f55->query_5.has_query17) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f55->query_base_addr + offset,
+ f55->query_17.data,
+ sizeof(f55->query_17.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 18 */
+ if (f55->query_17.has_query18) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f55->query_base_addr + offset,
+ f55->query_18.data,
+ sizeof(f55->query_18.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 19 */
+ if (f55->query_18.has_query19)
+ offset += 1;
+
+ /* query 20 */
+ if (f55->query_18.has_ctrl27_query20)
+ offset += 1;
+
+ /* query 21 */
+ if (f55->query_18.has_ctrl28_query21)
+ offset += 1;
+
+ /* query 22 */
+ if (f55->query_18.has_query22) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f55->query_base_addr + offset,
+ f55->query_22.data,
+ sizeof(f55->query_22.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 23 */
+ if (f55->query_22.has_query23) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f55->query_base_addr + offset,
+ f55->query_23.data,
+ sizeof(f55->query_23.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+
+ f55->amp_sensor = f55->query_23.amp_sensor_enabled;
+ f55->size_of_column2mux = f55->query_23.size_of_column2mux;
+ }
+
+ /* queries 24 25 26 27 reserved */
+
+ /* query 28 */
+ if (f55->query_22.has_query28) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f55->query_base_addr + offset,
+ f55->query_28.data,
+ sizeof(f55->query_28.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* query 29 */
+ if (f55->query_28.has_query29)
+ offset += 1;
+
+ /* query 30 */
+ if (f55->query_28.has_query30) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f55->query_base_addr + offset,
+ f55->query_30.data,
+ sizeof(f55->query_30.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+ }
+
+ /* queries 31 32 */
+ if (f55->query_30.has_query31_query32)
+ offset += 2;
+
+ /* query 33 */
+ if (f55->query_30.has_query33) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f55->query_base_addr + offset,
+ f55->query_33.data,
+ sizeof(f55->query_33.data));
+ if (retval < 0)
+ return retval;
+ offset += 1;
+
+ f55->extended_amp = f55->query_33.has_extended_amp_pad;
+ }
+
+ return 0;
+}
+
+static void test_f55_init(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char rx_electrodes;
+ unsigned char tx_electrodes;
+ struct f55_control_43 ctrl_43;
+
+ retval = test_f55_set_queries();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read F55 query registers\n",
+ __func__);
+ return;
+ }
+
+ if (!f55->query.has_sensor_assignment)
+ return;
+
+ retval = test_f55_set_controls();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set up F55 control registers\n",
+ __func__);
+ return;
+ }
+
+ tx_electrodes = f55->query.num_of_tx_electrodes;
+ rx_electrodes = f55->query.num_of_rx_electrodes;
+
+ f55->tx_assignment = kzalloc(tx_electrodes, GFP_KERNEL);
+ f55->rx_assignment = kzalloc(rx_electrodes, GFP_KERNEL);
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f55->control_base_addr + SENSOR_TX_MAPPING_OFFSET,
+ f55->tx_assignment,
+ tx_electrodes);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read F55 tx assignment\n",
+ __func__);
+ return;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f55->control_base_addr + SENSOR_RX_MAPPING_OFFSET,
+ f55->rx_assignment,
+ rx_electrodes);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read F55 rx assignment\n",
+ __func__);
+ return;
+ }
+
+ f54->tx_assigned = 0;
+ for (ii = 0; ii < tx_electrodes; ii++) {
+ if (f55->tx_assignment[ii] != 0xff)
+ f54->tx_assigned++;
+ }
+
+ f54->rx_assigned = 0;
+ for (ii = 0; ii < rx_electrodes; ii++) {
+ if (f55->rx_assignment[ii] != 0xff)
+ f54->rx_assigned++;
+ }
+
+ if (f55->amp_sensor) {
+ f54->tx_assigned = f55->size_of_column2mux;
+ f54->rx_assigned /= 2;
+ }
+
+ if (f55->extended_amp) {
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f55->control_base_addr + f55->afe_mux_offset,
+ ctrl_43.data,
+ sizeof(ctrl_43.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read F55 AFE mux sizes\n",
+ __func__);
+ return;
+ }
+
+ f54->tx_assigned = ctrl_43.afe_l_mux_size +
+ ctrl_43.afe_r_mux_size;
+ }
+
+ /* force mapping */
+ if (f55->has_force) {
+ f55->force_tx_assignment = kzalloc(tx_electrodes, GFP_KERNEL);
+ f55->force_rx_assignment = kzalloc(rx_electrodes, GFP_KERNEL);
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f55->control_base_addr + f55->force_tx_offset,
+ f55->force_tx_assignment,
+ tx_electrodes);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read F55 force tx assignment\n",
+ __func__);
+ return;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f55->control_base_addr + f55->force_rx_offset,
+ f55->force_rx_assignment,
+ rx_electrodes);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read F55 force rx assignment\n",
+ __func__);
+ return;
+ }
+
+ for (ii = 0; ii < tx_electrodes; ii++) {
+ if (f55->force_tx_assignment[ii] != 0xff)
+ f54->tx_assigned++;
+ }
+
+ for (ii = 0; ii < rx_electrodes; ii++) {
+ if (f55->force_rx_assignment[ii] != 0xff)
+ f54->rx_assigned++;
+ }
+ }
+
+ return;
+}
+
+static void test_f55_set_regs(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn_desc *fd,
+ unsigned char page)
+{
+ f55 = kzalloc(sizeof(*f55), GFP_KERNEL);
+ if (!f55) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for F55\n",
+ __func__);
+ return;
+ }
+
+ f55->query_base_addr = fd->query_base_addr | (page << 8);
+ f55->control_base_addr = fd->ctrl_base_addr | (page << 8);
+ f55->data_base_addr = fd->data_base_addr | (page << 8);
+ f55->command_base_addr = fd->cmd_base_addr | (page << 8);
+
+ return;
+}
+
+static void test_f21_init(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char size_of_query2;
+ unsigned char size_of_query5;
+ unsigned char query_11_offset;
+ unsigned char ctrl_4_offset;
+ struct f21_query_2 *query_2 = NULL;
+ struct f21_query_5 *query_5 = NULL;
+ struct f21_query_11 *query_11 = NULL;
+
+ query_2 = kzalloc(sizeof(*query_2), GFP_KERNEL);
+ if (!query_2) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for query_2\n",
+ __func__);
+ goto exit;
+ }
+
+ query_5 = kzalloc(sizeof(*query_5), GFP_KERNEL);
+ if (!query_5) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for query_5\n",
+ __func__);
+ goto exit;
+ }
+
+ query_11 = kzalloc(sizeof(*query_11), GFP_KERNEL);
+ if (!query_11) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for query_11\n",
+ __func__);
+ goto exit;
+ }
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f21->query_base_addr + 1,
+ &size_of_query2,
+ sizeof(size_of_query2));
+ if (retval < 0)
+ goto exit;
+
+ if (size_of_query2 > sizeof(query_2->data))
+ size_of_query2 = sizeof(query_2->data);
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f21->query_base_addr + 2,
+ query_2->data,
+ size_of_query2);
+ if (retval < 0)
+ goto exit;
+
+ if (!query_2->query11_is_present) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: No F21 force capabilities\n",
+ __func__);
+ goto exit;
+ }
+
+ query_11_offset = query_2->query0_is_present +
+ query_2->query1_is_present +
+ query_2->query2_is_present +
+ query_2->query3_is_present +
+ query_2->query4_is_present +
+ query_2->query5_is_present +
+ query_2->query6_is_present +
+ query_2->query7_is_present +
+ query_2->query8_is_present +
+ query_2->query9_is_present +
+ query_2->query10_is_present;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f21->query_base_addr + 11,
+ query_11->data,
+ sizeof(query_11->data));
+ if (retval < 0)
+ goto exit;
+
+ if (!query_11->has_force_sensing_txrx_mapping) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: No F21 force mapping\n",
+ __func__);
+ goto exit;
+ }
+
+ f21->max_num_of_tx = query_11->max_number_of_force_txs;
+ f21->max_num_of_rx = query_11->max_number_of_force_rxs;
+ f21->max_num_of_txrx = f21->max_num_of_tx + f21->max_num_of_rx;
+
+ f21->force_txrx_assignment = kzalloc(f21->max_num_of_txrx, GFP_KERNEL);
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f21->query_base_addr + 4,
+ &size_of_query5,
+ sizeof(size_of_query5));
+ if (retval < 0)
+ goto exit;
+
+ if (size_of_query5 > sizeof(query_5->data))
+ size_of_query5 = sizeof(query_5->data);
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f21->query_base_addr + 5,
+ query_5->data,
+ size_of_query5);
+ if (retval < 0)
+ goto exit;
+
+ ctrl_4_offset = query_5->ctrl0_is_present +
+ query_5->ctrl1_is_present +
+ query_5->ctrl2_is_present +
+ query_5->ctrl3_is_present;
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ f21->control_base_addr + ctrl_4_offset,
+ f21->force_txrx_assignment,
+ f21->max_num_of_txrx);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read F21 force txrx assignment\n",
+ __func__);
+ goto exit;
+ }
+
+ f21->has_force = true;
+
+ for (ii = 0; ii < f21->max_num_of_tx; ii++) {
+ if (f21->force_txrx_assignment[ii] != 0xff)
+ f21->tx_assigned++;
+ }
+
+ for (ii = f21->max_num_of_tx; ii < f21->max_num_of_txrx; ii++) {
+ if (f21->force_txrx_assignment[ii] != 0xff)
+ f21->rx_assigned++;
+ }
+
+exit:
+ kfree(query_2);
+ kfree(query_5);
+ kfree(query_11);
+
+ return;
+}
+
+static void test_f21_set_regs(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn_desc *fd,
+ unsigned char page)
+{
+ f21 = kzalloc(sizeof(*f21), GFP_KERNEL);
+ if (!f21) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for F21\n",
+ __func__);
+ return;
+ }
+
+ f21->query_base_addr = fd->query_base_addr | (page << 8);
+ f21->control_base_addr = fd->ctrl_base_addr | (page << 8);
+ f21->data_base_addr = fd->data_base_addr | (page << 8);
+ f21->command_base_addr = fd->cmd_base_addr | (page << 8);
+
+ return;
+}
+
+static int test_scan_pdt(void)
+{
+ int retval;
+ unsigned char intr_count = 0;
+ unsigned char page;
+ unsigned short addr;
+ bool f54found = false;
+ bool f55found = false;
+ struct synaptics_rmi4_fn_desc rmi_fd;
+ struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+ for (page = 0; page < PAGES_TO_SERVICE; page++) {
+ for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+ addr |= (page << 8);
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ addr,
+ (unsigned char *)&rmi_fd,
+ sizeof(rmi_fd));
+ if (retval < 0)
+ return retval;
+
+ addr &= ~(MASK_8BIT << 8);
+
+ if (!rmi_fd.fn_number)
+ break;
+
+ switch (rmi_fd.fn_number) {
+ case SYNAPTICS_RMI4_F54:
+ test_f54_set_regs(rmi4_data,
+ &rmi_fd, intr_count, page);
+ f54found = true;
+ break;
+ case SYNAPTICS_RMI4_F55:
+ test_f55_set_regs(rmi4_data,
+ &rmi_fd, page);
+ f55found = true;
+ break;
+ case SYNAPTICS_RMI4_F21:
+ test_f21_set_regs(rmi4_data,
+ &rmi_fd, page);
+ break;
+ default:
+ break;
+ }
+
+ if (f54found && f55found)
+ goto pdt_done;
+
+ intr_count += rmi_fd.intr_src_count;
+ }
+ }
+
+ if (!f54found) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to find F54\n",
+ __func__);
+ return -EINVAL;
+ }
+
+pdt_done:
+ return 0;
+}
+
+static void synaptics_rmi4_test_attn(struct synaptics_rmi4_data *rmi4_data,
+ unsigned char intr_mask)
+{
+ if (!f54)
+ return;
+
+ if (f54->intr_mask & intr_mask)
+ queue_work(f54->test_report_workqueue, &f54->test_report_work);
+
+ return;
+}
+
+static int synaptics_rmi4_test_init(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+
+ if (f54) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Handle already exists\n",
+ __func__);
+ return 0;
+ }
+
+ f54 = kzalloc(sizeof(*f54), GFP_KERNEL);
+ if (!f54) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for F54\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ f54->rmi4_data = rmi4_data;
+
+ f55 = NULL;
+
+ f21 = NULL;
+
+ retval = test_scan_pdt();
+ if (retval < 0)
+ goto exit_free_mem;
+
+ retval = test_set_queries();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read F54 query registers\n",
+ __func__);
+ goto exit_free_mem;
+ }
+
+ f54->tx_assigned = f54->query.num_of_tx_electrodes;
+ f54->rx_assigned = f54->query.num_of_rx_electrodes;
+
+ retval = test_set_controls();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set up F54 control registers\n",
+ __func__);
+ goto exit_free_control;
+ }
+
+ test_set_data();
+
+ if (f55)
+ test_f55_init(rmi4_data);
+
+ if (f21)
+ test_f21_init(rmi4_data);
+
+ if (rmi4_data->external_afe_buttons)
+ f54->tx_assigned++;
+
+ retval = test_set_sysfs();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create sysfs entries\n",
+ __func__);
+ goto exit_sysfs;
+ }
+
+ f54->test_report_workqueue =
+ create_singlethread_workqueue("test_report_workqueue");
+ INIT_WORK(&f54->test_report_work, test_report_work);
+
+ hrtimer_init(&f54->watchdog, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ f54->watchdog.function = test_get_report_timeout;
+ INIT_WORK(&f54->timeout_work, test_timeout_work);
+
+ mutex_init(&f54->status_mutex);
+ f54->status = STATUS_IDLE;
+
+ return 0;
+
+exit_sysfs:
+ if (f21)
+ kfree(f21->force_txrx_assignment);
+
+ if (f55) {
+ kfree(f55->tx_assignment);
+ kfree(f55->rx_assignment);
+ kfree(f55->force_tx_assignment);
+ kfree(f55->force_rx_assignment);
+ }
+
+exit_free_control:
+ test_free_control_mem();
+
+exit_free_mem:
+ kfree(f21);
+ f21 = NULL;
+ kfree(f55);
+ f55 = NULL;
+ kfree(f54);
+ f54 = NULL;
+
+exit:
+ return retval;
+}
+
+static void synaptics_rmi4_test_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!f54)
+ goto exit;
+
+ hrtimer_cancel(&f54->watchdog);
+
+ cancel_work_sync(&f54->test_report_work);
+ flush_workqueue(f54->test_report_workqueue);
+ destroy_workqueue(f54->test_report_workqueue);
+
+ test_remove_sysfs();
+
+ if (f21)
+ kfree(f21->force_txrx_assignment);
+
+ if (f55) {
+ kfree(f55->tx_assignment);
+ kfree(f55->rx_assignment);
+ kfree(f55->force_tx_assignment);
+ kfree(f55->force_rx_assignment);
+ }
+
+ test_free_control_mem();
+
+ if (f54->data_buffer_size)
+ kfree(f54->report_data);
+
+ kfree(f21);
+ f21 = NULL;
+
+ kfree(f55);
+ f55 = NULL;
+
+ kfree(f54);
+ f54 = NULL;
+
+exit:
+ complete(&test_remove_complete);
+
+ return;
+}
+
+static void synaptics_rmi4_test_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+
+ if (!f54) {
+ synaptics_rmi4_test_init(rmi4_data);
+ return;
+ }
+
+ if (f21)
+ kfree(f21->force_txrx_assignment);
+
+ if (f55) {
+ kfree(f55->tx_assignment);
+ kfree(f55->rx_assignment);
+ kfree(f55->force_tx_assignment);
+ kfree(f55->force_rx_assignment);
+ }
+
+ test_free_control_mem();
+
+ kfree(f55);
+ f55 = NULL;
+
+ kfree(f21);
+ f21 = NULL;
+
+ retval = test_scan_pdt();
+ if (retval < 0)
+ goto exit_free_mem;
+
+ retval = test_set_queries();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to read F54 query registers\n",
+ __func__);
+ goto exit_free_mem;
+ }
+
+ f54->tx_assigned = f54->query.num_of_tx_electrodes;
+ f54->rx_assigned = f54->query.num_of_rx_electrodes;
+
+ retval = test_set_controls();
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to set up F54 control registers\n",
+ __func__);
+ goto exit_free_control;
+ }
+
+ test_set_data();
+
+ if (f55)
+ test_f55_init(rmi4_data);
+
+ if (f21)
+ test_f21_init(rmi4_data);
+
+ if (rmi4_data->external_afe_buttons)
+ f54->tx_assigned++;
+
+ f54->status = STATUS_IDLE;
+
+ return;
+
+exit_free_control:
+ test_free_control_mem();
+
+exit_free_mem:
+ hrtimer_cancel(&f54->watchdog);
+
+ cancel_work_sync(&f54->test_report_work);
+ flush_workqueue(f54->test_report_workqueue);
+ destroy_workqueue(f54->test_report_workqueue);
+
+ test_remove_sysfs();
+
+ if (f54->data_buffer_size)
+ kfree(f54->report_data);
+
+ kfree(f21);
+ f21 = NULL;
+
+ kfree(f55);
+ f55 = NULL;
+
+ kfree(f54);
+ f54 = NULL;
+
+ return;
+}
+
+static struct synaptics_rmi4_exp_fn test_module = {
+ .fn_type = RMI_TEST_REPORTING,
+ .init = synaptics_rmi4_test_init,
+ .remove = synaptics_rmi4_test_remove,
+ .reset = synaptics_rmi4_test_reset,
+ .reinit = NULL,
+ .early_suspend = NULL,
+ .suspend = NULL,
+ .resume = NULL,
+ .late_resume = NULL,
+ .attn = synaptics_rmi4_test_attn,
+};
+
+static int __init rmi4_test_module_init(void)
+{
+ synaptics_rmi4_new_function(&test_module, true);
+
+ return 0;
+}
+
+static void __exit rmi4_test_module_exit(void)
+{
+ synaptics_rmi4_new_function(&test_module, false);
+
+ wait_for_completion(&test_remove_complete);
+
+ return;
+}
+
+module_init(rmi4_test_module_init);
+module_exit(rmi4_test_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Test Reporting Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_video.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_video.c
new file mode 100644
index 0000000..b9ae0ac
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_video.c
@@ -0,0 +1,416 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define SYSFS_FOLDER_NAME "video"
+
+/*
+*#define RMI_DCS_SUSPEND_RESUME
+*/
+
+static ssize_t video_sysfs_dcs_write_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t video_sysfs_param_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static int video_send_dcs_command(unsigned char command_opcode);
+
+struct f38_command {
+ union {
+ struct {
+ unsigned char command_opcode;
+ unsigned char register_access:1;
+ unsigned char gamma_page:1;
+ unsigned char f38_control1_b2__7:6;
+ unsigned char parameter_field_1;
+ unsigned char parameter_field_2;
+ unsigned char parameter_field_3;
+ unsigned char parameter_field_4;
+ unsigned char send_to_dcs:1;
+ unsigned char f38_command6_b1__7:7;
+ } __packed;
+ unsigned char data[7];
+ };
+};
+
+struct synaptics_rmi4_video_handle {
+ unsigned char param;
+ unsigned short query_base_addr;
+ unsigned short control_base_addr;
+ unsigned short data_base_addr;
+ unsigned short command_base_addr;
+ struct synaptics_rmi4_data *rmi4_data;
+ struct kobject *sysfs_dir;
+};
+
+#ifdef RMI_DCS_SUSPEND_RESUME
+struct dcs_command {
+ unsigned char command;
+ unsigned int wait_time;
+};
+
+static struct dcs_command suspend_sequence[] = {
+ {
+ .command = 0x28,
+ .wait_time = 200,
+ },
+ {
+ .command = 0x10,
+ .wait_time = 200,
+ },
+};
+
+static struct dcs_command resume_sequence[] = {
+ {
+ .command = 0x11,
+ .wait_time = 200,
+ },
+ {
+ .command = 0x29,
+ .wait_time = 200,
+ },
+};
+#endif
+
+static struct device_attribute attrs[] = {
+ __ATTR(dcs_write, 0220,
+ synaptics_rmi4_show_error,
+ video_sysfs_dcs_write_store),
+ __ATTR(param, 0220,
+ synaptics_rmi4_show_error,
+ video_sysfs_param_store),
+};
+
+static struct synaptics_rmi4_video_handle *video;
+
+DECLARE_COMPLETION(video_remove_complete);
+
+static ssize_t video_sysfs_dcs_write_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned int input;
+
+ if (kstrtouint(buf, 16, &input) != 1)
+ return -EINVAL;
+
+ retval = video_send_dcs_command((unsigned char)input);
+ if (retval < 0)
+ return retval;
+
+ return count;
+}
+
+static ssize_t video_sysfs_param_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned int input;
+
+ if (kstrtouint(buf, 16, &input) != 1)
+ return -EINVAL;
+
+ video->param = (unsigned char)input;
+
+ return count;
+}
+
+static int video_send_dcs_command(unsigned char command_opcode)
+{
+ int retval;
+ struct f38_command command;
+ struct synaptics_rmi4_data *rmi4_data = video->rmi4_data;
+
+ memset(&command, 0x00, sizeof(command));
+
+ command.command_opcode = command_opcode;
+ command.parameter_field_1 = video->param;
+ command.send_to_dcs = 1;
+
+ video->param = 0;
+
+ retval = synaptics_rmi4_reg_write(rmi4_data,
+ video->command_base_addr,
+ command.data,
+ sizeof(command.data));
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to send DCS command\n",
+ __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+static int video_scan_pdt(void)
+{
+ int retval;
+ unsigned char page;
+ unsigned short addr;
+ bool f38_found = false;
+ struct synaptics_rmi4_fn_desc rmi_fd;
+ struct synaptics_rmi4_data *rmi4_data = video->rmi4_data;
+
+ for (page = 0; page < PAGES_TO_SERVICE; page++) {
+ for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+ addr |= (page << 8);
+
+ retval = synaptics_rmi4_reg_read(rmi4_data,
+ addr,
+ (unsigned char *)&rmi_fd,
+ sizeof(rmi_fd));
+ if (retval < 0)
+ return retval;
+
+ addr &= ~(MASK_8BIT << 8);
+
+ if (!rmi_fd.fn_number)
+ break;
+
+ if (rmi_fd.fn_number == SYNAPTICS_RMI4_F38) {
+ f38_found = true;
+ goto f38_found;
+ }
+ }
+ }
+
+ if (!f38_found) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to find F38\n",
+ __func__);
+ return -EINVAL;
+ }
+
+f38_found:
+ video->query_base_addr = rmi_fd.query_base_addr | (page << 8);
+ video->control_base_addr = rmi_fd.ctrl_base_addr | (page << 8);
+ video->data_base_addr = rmi_fd.data_base_addr | (page << 8);
+ video->command_base_addr = rmi_fd.cmd_base_addr | (page << 8);
+
+ return 0;
+}
+
+static int synaptics_rmi4_video_init(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned char attr_count;
+
+ if (video) {
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "%s: Handle already exists\n",
+ __func__);
+ return 0;
+ }
+
+ video = kzalloc(sizeof(*video), GFP_KERNEL);
+ if (!video) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to alloc mem for video\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ video->rmi4_data = rmi4_data;
+
+ retval = video_scan_pdt();
+ if (retval < 0) {
+ retval = 0;
+ goto exit_scan_pdt;
+ }
+
+ video->sysfs_dir = kobject_create_and_add(SYSFS_FOLDER_NAME,
+ &rmi4_data->input_dev->dev.kobj);
+ if (!video->sysfs_dir) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create sysfs directory\n",
+ __func__);
+ retval = -ENODEV;
+ goto exit_sysfs_dir;
+ }
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ retval = sysfs_create_file(video->sysfs_dir,
+ &attrs[attr_count].attr);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Failed to create sysfs attributes\n",
+ __func__);
+ retval = -ENODEV;
+ goto exit_sysfs_attrs;
+ }
+ }
+
+ return 0;
+
+exit_sysfs_attrs:
+ for (attr_count--; attr_count >= 0; attr_count--)
+ sysfs_remove_file(video->sysfs_dir, &attrs[attr_count].attr);
+
+ kobject_put(video->sysfs_dir);
+
+exit_sysfs_dir:
+exit_scan_pdt:
+ kfree(video);
+ video = NULL;
+
+exit:
+ return retval;
+}
+
+static void synaptics_rmi4_video_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+ unsigned char attr_count;
+
+ if (!video)
+ goto exit;
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++)
+ sysfs_remove_file(video->sysfs_dir, &attrs[attr_count].attr);
+
+ kobject_put(video->sysfs_dir);
+
+ kfree(video);
+ video = NULL;
+
+exit:
+ complete(&video_remove_complete);
+
+ return;
+}
+
+static void synaptics_rmi4_video_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+ if (!video)
+ synaptics_rmi4_video_init(rmi4_data);
+
+ return;
+}
+
+#ifdef RMI_DCS_SUSPEND_RESUME
+static void synaptics_rmi4_video_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char command;
+ unsigned char num_of_cmds;
+
+ if (!video)
+ return;
+
+ num_of_cmds = ARRAY_SIZE(suspend_sequence);
+
+ for (ii = 0; ii < num_of_cmds; ii++) {
+ command = suspend_sequence[ii].command;
+ retval = video_send_dcs_command(command);
+ if (retval < 0)
+ return;
+ msleep(suspend_sequence[ii].wait_time);
+ }
+
+ return;
+}
+
+static void synaptics_rmi4_video_resume(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char command;
+ unsigned char num_of_cmds;
+
+ if (!video)
+ return;
+
+ num_of_cmds = ARRAY_SIZE(resume_sequence);
+
+ for (ii = 0; ii < num_of_cmds; ii++) {
+ command = resume_sequence[ii].command;
+ retval = video_send_dcs_command(command);
+ if (retval < 0)
+ return;
+ msleep(resume_sequence[ii].wait_time);
+ }
+
+ return;
+}
+#endif
+
+static struct synaptics_rmi4_exp_fn video_module = {
+ .fn_type = RMI_VIDEO,
+ .init = synaptics_rmi4_video_init,
+ .remove = synaptics_rmi4_video_remove,
+ .reset = synaptics_rmi4_video_reset,
+ .reinit = NULL,
+ .early_suspend = NULL,
+#ifdef RMI_DCS_SUSPEND_RESUME
+ .suspend = synaptics_rmi4_video_suspend,
+ .resume = synaptics_rmi4_video_resume,
+#else
+ .suspend = NULL,
+ .resume = NULL,
+#endif
+ .late_resume = NULL,
+ .attn = NULL,
+};
+
+static int __init rmi4_video_module_init(void)
+{
+ synaptics_rmi4_new_function(&video_module, true);
+
+ return 0;
+}
+
+static void __exit rmi4_video_module_exit(void)
+{
+ synaptics_rmi4_new_function(&video_module, false);
+
+ wait_for_completion(&video_remove_complete);
+
+ return;
+}
+
+module_init(rmi4_video_module_init);
+module_exit(rmi4_video_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Video Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 620fc50..ca267a8 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -7,6 +7,7 @@
obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
+obj-$(CONFIG_MSM_TZ_SMMU) += io-pgtable-msm-secure.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_FAST) += io-pgtable-fast.o dma-mapping-fast.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
obj-$(CONFIG_IOMMU_DEBUG) += iommu-debug.o
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 6324728..28ef920 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -55,6 +55,9 @@
#include <linux/remote_spinlock.h>
#include <linux/ktime.h>
#include <trace/events/iommu.h>
+#include <soc/qcom/msm_tz_smmu.h>
+#include <soc/qcom/scm.h>
+#include <linux/notifier.h>
#include <linux/amba/bus.h>
@@ -174,6 +177,7 @@
#define SMR_VALID (1 << 31)
#define SMR_MASK_SHIFT 16
#define SMR_MASK_MASK 0x7FFF
+#define SID_MASK 0x7FFF
#define SMR_ID_SHIFT 0
#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
@@ -411,6 +415,7 @@
void __iomem *base;
unsigned long size;
+ phys_addr_t phys_addr;
unsigned long pgshift;
#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
@@ -435,6 +440,8 @@
#define ARM_SMMU_OPT_NO_ASID_RETENTION (1 << 5)
#define ARM_SMMU_OPT_DISABLE_ATOS (1 << 6)
#define ARM_SMMU_OPT_MMU500_ERRATA1 (1 << 7)
+#define ARM_SMMU_OPT_STATIC_CB (1 << 8)
+#define ARM_SMMU_OPT_HALT (1 << 9)
u32 options;
enum arm_smmu_arch_version version;
enum arm_smmu_implementation model;
@@ -468,6 +475,7 @@
unsigned int num_impl_def_attach_registers;
struct arm_smmu_power_resources *pwr;
+ struct notifier_block regulator_nb;
spinlock_t atos_lock;
@@ -477,6 +485,8 @@
struct arm_smmu_arch_ops *arch_ops;
void *archdata;
+
+ enum tz_smmu_device_id sec_id;
};
enum arm_smmu_context_fmt {
@@ -528,6 +538,7 @@
enum arm_smmu_domain_stage stage;
struct mutex init_mutex; /* Protects smmu pointer */
u32 attributes;
+ bool slave_side_secure;
u32 secure_vmid;
struct list_head pte_info_list;
struct list_head unassign_list;
@@ -561,6 +572,8 @@
{ ARM_SMMU_OPT_NO_ASID_RETENTION, "qcom,no-asid-retention" },
{ ARM_SMMU_OPT_DISABLE_ATOS, "qcom,disable-atos" },
{ ARM_SMMU_OPT_MMU500_ERRATA1, "qcom,mmu500-errata-1" },
+ { ARM_SMMU_OPT_STATIC_CB, "qcom,enable-static-cb"},
+ { ARM_SMMU_OPT_HALT, "qcom,enable-smmu-halt"},
{ 0, NULL},
};
@@ -585,6 +598,10 @@
struct device *dev);
static struct iommu_gather_ops qsmmuv500_errata1_smmu_gather_ops;
+static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu);
+static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain);
+static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain);
+
static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
{
return container_of(dom, struct arm_smmu_domain, domain);
@@ -611,6 +628,22 @@
return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
}
+static int arm_smmu_restore_sec_cfg(struct arm_smmu_device *smmu)
+{
+ int ret;
+ int scm_ret = 0;
+
+ if (!arm_smmu_is_static_cb(smmu))
+ return 0;
+
+ ret = scm_restore_sec_cfg(smmu->sec_id, 0x0, &scm_ret);
+ if (ret || scm_ret) {
+ pr_err("scm call IOMMU_SECURE_CFG failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
static bool is_iommu_pt_coherent(struct arm_smmu_domain *smmu_domain)
{
if (smmu_domain->attributes &
@@ -622,20 +655,37 @@
return false;
}
-static bool arm_smmu_is_domain_secure(struct arm_smmu_domain *smmu_domain)
+static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu)
+{
+ return smmu->options & ARM_SMMU_OPT_STATIC_CB;
+}
+
+static bool arm_smmu_has_secure_vmid(struct arm_smmu_domain *smmu_domain)
{
return (smmu_domain->secure_vmid != VMID_INVAL);
}
+static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain)
+{
+ return arm_smmu_has_secure_vmid(smmu_domain) &&
+ smmu_domain->slave_side_secure;
+}
+
+static bool arm_smmu_is_master_side_secure(struct arm_smmu_domain *smmu_domain)
+{
+ return arm_smmu_has_secure_vmid(smmu_domain)
+ && !smmu_domain->slave_side_secure;
+}
+
static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
{
- if (arm_smmu_is_domain_secure(smmu_domain))
+ if (arm_smmu_is_master_side_secure(smmu_domain))
mutex_lock(&smmu_domain->assign_lock);
}
static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
{
- if (arm_smmu_is_domain_secure(smmu_domain))
+ if (arm_smmu_is_master_side_secure(smmu_domain))
mutex_unlock(&smmu_domain->assign_lock);
}
@@ -1272,7 +1322,7 @@
void *page;
struct arm_smmu_domain *smmu_domain = cookie;
- if (!arm_smmu_is_domain_secure(smmu_domain))
+ if (!arm_smmu_is_master_side_secure(smmu_domain))
return alloc_pages_exact(size, gfp_mask);
page = arm_smmu_secure_pool_remove(smmu_domain, size);
@@ -1295,7 +1345,7 @@
{
struct arm_smmu_domain *smmu_domain = cookie;
- if (!arm_smmu_is_domain_secure(smmu_domain)) {
+ if (!arm_smmu_is_master_side_secure(smmu_domain)) {
free_pages_exact(virt, size);
return;
}
@@ -1506,6 +1556,22 @@
return IRQ_HANDLED;
}
+static int arm_smmu_set_pt_format(struct arm_smmu_domain *smmu_domain,
+ struct io_pgtable_cfg *pgtbl_cfg)
+{
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ int ret = 0;
+
+ if ((smmu->version > ARM_SMMU_V1) &&
+ (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) &&
+ !arm_smmu_has_secure_vmid(smmu_domain) &&
+ arm_smmu_is_static_cb(smmu)) {
+ ret = msm_tz_set_cb_format(smmu->sec_id, cfg->cbndx);
+ }
+ return ret;
+}
+
static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg)
{
@@ -1816,14 +1882,27 @@
cfg->irptndx = cfg->cbndx;
}
- smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
- .quirks = quirks,
- .pgsize_bitmap = smmu->pgsize_bitmap,
- .ias = ias,
- .oas = oas,
- .tlb = tlb,
- .iommu_dev = smmu->dev,
- };
+ if (arm_smmu_is_slave_side_secure(smmu_domain)) {
+ smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
+ .quirks = quirks,
+ .pgsize_bitmap = smmu->pgsize_bitmap,
+ .arm_msm_secure_cfg = {
+ .sec_id = smmu->sec_id,
+ .cbndx = cfg->cbndx,
+ },
+ .iommu_dev = smmu->dev,
+ };
+ fmt = ARM_MSM_SECURE;
+ } else {
+ smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
+ .quirks = quirks,
+ .pgsize_bitmap = smmu->pgsize_bitmap,
+ .ias = ias,
+ .oas = oas,
+ .tlb = tlb,
+ .iommu_dev = smmu->dev,
+ };
+ }
smmu_domain->smmu = smmu;
smmu_domain->dev = dev;
@@ -1856,6 +1935,13 @@
/* Initialise the context bank with our page table cfg */
arm_smmu_init_context_bank(smmu_domain,
&smmu_domain->pgtbl_cfg);
+ /* for slave side secure, we may have to force the pagetable
+ * format to V8L.
+ */
+ ret = arm_smmu_set_pt_format(smmu_domain,
+ &smmu_domain->pgtbl_cfg);
+ if (ret)
+ goto out_clear_smmu;
arm_smmu_arch_init_context_bank(smmu_domain, dev);
@@ -2164,6 +2250,8 @@
const struct iommu_gather_ops *tlb;
tlb = smmu_domain->pgtbl_cfg.tlb;
+ if (!tlb)
+ return;
mutex_lock(&smmu->stream_map_mutex);
for_each_cfg_sme(fwspec, i, idx) {
@@ -2240,7 +2328,7 @@
int source_vmid = VMID_HLOS;
struct arm_smmu_pte_info *pte_info, *temp;
- if (!arm_smmu_is_domain_secure(smmu_domain))
+ if (!arm_smmu_is_master_side_secure(smmu_domain))
return ret;
list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
@@ -2267,7 +2355,7 @@
int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
struct arm_smmu_pte_info *pte_info, *temp;
- if (!arm_smmu_is_domain_secure(smmu_domain))
+ if (!arm_smmu_is_master_side_secure(smmu_domain))
return;
list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
@@ -2291,7 +2379,14 @@
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_pte_info *pte_info;
- BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
+ if (smmu_domain->slave_side_secure ||
+ !arm_smmu_has_secure_vmid(smmu_domain)) {
+ if (smmu_domain->slave_side_secure)
+ WARN(1, "slave side secure is enforced\n");
+ else
+ WARN(1, "Invalid VMID is set !!\n");
+ return;
+ }
pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
if (!pte_info)
@@ -2307,7 +2402,14 @@
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_pte_info *pte_info;
- BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
+ if (smmu_domain->slave_side_secure ||
+ !arm_smmu_has_secure_vmid(smmu_domain)) {
+ if (smmu_domain->slave_side_secure)
+ WARN(1, "slave side secure is enforced\n");
+ else
+ WARN(1, "Invalid VMID is set !!\n");
+ return -EINVAL;
+ }
pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
if (!pte_info)
@@ -3285,7 +3387,20 @@
reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
- writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
+
+ if (arm_smmu_is_static_cb(smmu)) {
+ phys_addr_t impl_def1_base_phys = impl_def1_base - smmu->base +
+ smmu->phys_addr;
+
+ if (scm_io_write(impl_def1_base_phys +
+ IMPL_DEF1_MICRO_MMU_CTRL, reg)) {
+ dev_err(smmu->dev,
+ "scm_io_write fail. SMMU might not be halted");
+ return -EINVAL;
+ }
+ } else {
+ writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
+ }
return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
}
@@ -3307,7 +3422,18 @@
reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
- writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
+
+ if (arm_smmu_is_static_cb(smmu)) {
+ phys_addr_t impl_def1_base_phys = impl_def1_base - smmu->base +
+ smmu->phys_addr;
+
+ if (scm_io_write(impl_def1_base_phys +
+ IMPL_DEF1_MICRO_MMU_CTRL, reg))
+ dev_err(smmu->dev,
+ "scm_io_write fail. SMMU might not be resumed");
+ } else {
+ writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
+ }
}
static void qsmmuv2_device_reset(struct arm_smmu_device *smmu)
@@ -3540,7 +3666,10 @@
cb = smmu->s2crs[idx].cbndx;
}
- if (cb < 0) {
+ if (cb >= 0 && arm_smmu_is_static_cb(smmu))
+ smmu_domain->slave_side_secure = true;
+
+ if (cb < 0 && !arm_smmu_is_static_cb(smmu)) {
mutex_unlock(&smmu->stream_map_mutex);
return __arm_smmu_alloc_bitmap(smmu->context_map,
smmu->num_s2_context_banks,
@@ -3549,7 +3678,8 @@
for (i = 0; i < smmu->num_mapping_groups; i++) {
if (smmu->s2crs[i].cb_handoff && smmu->s2crs[i].cbndx == cb) {
- smmu->s2crs[i].cb_handoff = false;
+ if (!arm_smmu_is_static_cb(smmu))
+ smmu->s2crs[i].cb_handoff = false;
smmu->s2crs[i].count -= 1;
}
}
@@ -3691,6 +3821,71 @@
return 0;
}
+static int regulator_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ int ret = 0;
+ struct arm_smmu_device *smmu = container_of(nb, struct arm_smmu_device,
+ regulator_nb);
+
+ if (event != REGULATOR_EVENT_PRE_DISABLE &&
+ event != REGULATOR_EVENT_ENABLE)
+ return NOTIFY_OK;
+
+ ret = arm_smmu_prepare_clocks(smmu->pwr);
+ if (ret)
+ goto out;
+
+ ret = arm_smmu_power_on_atomic(smmu->pwr);
+ if (ret)
+ goto unprepare_clock;
+
+ if (event == REGULATOR_EVENT_PRE_DISABLE)
+ qsmmuv2_halt(smmu);
+ else if (event == REGULATOR_EVENT_ENABLE) {
+ if (arm_smmu_restore_sec_cfg(smmu))
+ goto power_off;
+ qsmmuv2_resume(smmu);
+ }
+power_off:
+ arm_smmu_power_off_atomic(smmu->pwr);
+unprepare_clock:
+ arm_smmu_unprepare_clocks(smmu->pwr);
+out:
+ return NOTIFY_OK;
+}
+
+static int register_regulator_notifier(struct arm_smmu_device *smmu)
+{
+ struct device *dev = smmu->dev;
+ struct regulator_bulk_data *consumers;
+ int ret = 0, num_consumers;
+ struct arm_smmu_power_resources *pwr = smmu->pwr;
+
+ if (!(smmu->options & ARM_SMMU_OPT_HALT))
+ goto out;
+
+ num_consumers = pwr->num_gdscs;
+ consumers = pwr->gdscs;
+
+ if (!num_consumers) {
+ dev_info(dev, "no regulator info exist for %s\n",
+ dev_name(dev));
+ goto out;
+ }
+
+ smmu->regulator_nb.notifier_call = regulator_notifier;
+ /* registering the notifier against one gdsc is sufficient as
+ * we do enable/disable regulators in group.
+ */
+ ret = regulator_register_notifier(consumers[0].consumer,
+ &smmu->regulator_nb);
+ if (ret)
+ dev_err(dev, "Regulator notifier request failed\n");
+out:
+ return ret;
+}
+
static int arm_smmu_init_regulators(struct arm_smmu_power_resources *pwr)
{
const char *cname;
@@ -3801,6 +3996,9 @@
bool cttw_dt, cttw_reg;
int i;
+ if (arm_smmu_restore_sec_cfg(smmu))
+ return -ENODEV;
+
dev_dbg(smmu->dev, "probing hardware configuration...\n");
dev_dbg(smmu->dev, "SMMUv%d with:\n",
smmu->version == ARM_SMMU_V2 ? 2 : 1);
@@ -3878,26 +4076,32 @@
* bits are set, so check each one separately. We can reject
* masters later if they try to claim IDs outside these masks.
*/
- for (i = 0; i < size; i++) {
+ if (!arm_smmu_is_static_cb(smmu)) {
+ for (i = 0; i < size; i++) {
+ smr = readl_relaxed(
+ gr0_base + ARM_SMMU_GR0_SMR(i));
+ if (!(smr & SMR_VALID))
+ break;
+ }
+ if (i == size) {
+ dev_err(smmu->dev,
+ "Unable to compute streamid_masks\n");
+ return -ENODEV;
+ }
+
+ smr = smmu->streamid_mask << SMR_ID_SHIFT;
+ writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
- if (!(smr & SMR_VALID))
- break;
- }
- if (i == size) {
- dev_err(smmu->dev,
- "Unable to compute streamid_masks\n");
- return -ENODEV;
- }
+ smmu->streamid_mask = smr >> SMR_ID_SHIFT;
- smr = smmu->streamid_mask << SMR_ID_SHIFT;
- writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
- smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
- smmu->streamid_mask = smr >> SMR_ID_SHIFT;
-
- smr = smmu->streamid_mask << SMR_MASK_SHIFT;
- writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
- smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
- smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
+ smr = smmu->streamid_mask << SMR_MASK_SHIFT;
+ writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
+ smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
+ smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
+ } else {
+ smmu->smr_mask_mask = SMR_MASK_MASK;
+ smmu->streamid_mask = SID_MASK;
+ }
/* Zero-initialised to mark as invalid */
smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
@@ -4061,7 +4265,23 @@
};
MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
+#ifdef CONFIG_MSM_TZ_SMMU
+int register_iommu_sec_ptbl(void)
+{
+ struct device_node *np;
+ for_each_matching_node(np, arm_smmu_of_match)
+ if (of_find_property(np, "qcom,tz-device-id", NULL) &&
+ of_device_is_available(np))
+ break;
+ if (!np)
+ return -ENODEV;
+
+ of_node_put(np);
+
+ return msm_iommu_sec_pgtbl_init();
+}
+#endif
static int arm_smmu_of_iommu_configure_fixup(struct device *dev, void *data)
{
if (!dev->iommu_fwspec)
@@ -4115,6 +4335,8 @@
smmu->arch_ops = data->arch_ops;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res)
+ smmu->phys_addr = res->start;
smmu->base = devm_ioremap_resource(dev, res);
if (IS_ERR(smmu->base))
return PTR_ERR(smmu->base);
@@ -4166,6 +4388,7 @@
if (err)
goto out_exit_power_resources;
+ smmu->sec_id = msm_dev_to_device_id(dev);
err = arm_smmu_device_cfg_probe(smmu);
if (err)
goto out_power_off;
@@ -4223,6 +4446,11 @@
else
bus_for_each_dev(&platform_bus_type, NULL, &arm_smmu_ops,
arm_smmu_add_device_fixup);
+
+ err = register_regulator_notifier(smmu);
+ if (err)
+ goto out_power_off;
+
#ifdef CONFIG_ARM_AMBA
if (!iommu_present(&amba_bustype))
bus_set_iommu(&amba_bustype, &arm_smmu_ops);
@@ -4293,6 +4521,9 @@
return ret;
ret = platform_driver_register(&arm_smmu_driver);
+#ifdef CONFIG_MSM_TZ_SMMU
+ ret = register_iommu_sec_ptbl();
+#endif
registered = !ret;
trace_smmu_init(ktime_us_delta(ktime_get(), cur));
diff --git a/drivers/iommu/io-pgtable-msm-secure.c b/drivers/iommu/io-pgtable-msm-secure.c
new file mode 100644
index 0000000..983b28b
--- /dev/null
+++ b/drivers/iommu/io-pgtable-msm-secure.c
@@ -0,0 +1,350 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "io-pgtable-msm-secure: " fmt
+
+#include <linux/iommu.h>
+#include <linux/kernel.h>
+#include <linux/scatterlist.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <soc/qcom/scm.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+
+#include "io-pgtable.h"
+
+#define IOMMU_SECURE_PTBL_SIZE 3
+#define IOMMU_SECURE_PTBL_INIT 4
+#define IOMMU_SECURE_MAP2_FLAT 0x12
+#define IOMMU_SECURE_UNMAP2_FLAT 0x13
+#define IOMMU_TLBINVAL_FLAG 0x00000001
+
+#define io_pgtable_to_data(x) \
+ container_of((x), struct msm_secure_io_pgtable, iop)
+
+#define io_pgtable_ops_to_pgtable(x) \
+ container_of((x), struct io_pgtable, ops)
+
+#define io_pgtable_ops_to_data(x) \
+ io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
+
+struct msm_secure_io_pgtable {
+ struct io_pgtable iop;
+};
+
+int msm_iommu_sec_pgtbl_init(void)
+{
+ int psize[2] = {0, 0};
+ unsigned int spare = 0;
+ int ret, ptbl_ret = 0;
+ struct device dev = {0};
+ void *cpu_addr;
+ dma_addr_t paddr;
+ unsigned long attrs = 0;
+
+ if (is_scm_armv8()) {
+ struct scm_desc desc = {0};
+
+ desc.args[0] = spare;
+ desc.arginfo = SCM_ARGS(1);
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
+ IOMMU_SECURE_PTBL_SIZE), &desc);
+ psize[0] = desc.ret[0];
+ psize[1] = desc.ret[1];
+ if (ret || psize[1]) {
+ pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
+ return ret;
+ }
+ }
+
+ /* Now allocate memory for the secure page tables */
+ attrs = DMA_ATTR_NO_KERNEL_MAPPING;
+ dev.coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+ arch_setup_dma_ops(&dev, 0, 0, NULL, 1);
+ cpu_addr = dma_alloc_attrs(&dev, psize[0], &paddr, GFP_KERNEL, attrs);
+ if (!cpu_addr) {
+ pr_err("%s: Failed to allocate %d bytes for PTBL\n",
+ __func__, psize[0]);
+ return -ENOMEM;
+ }
+
+ if (is_scm_armv8()) {
+ struct scm_desc desc = {0};
+
+ desc.args[0] = paddr;
+ desc.args[1] = psize[0];
+ desc.args[2] = 0;
+ desc.arginfo = SCM_ARGS(3, SCM_RW, SCM_VAL, SCM_VAL);
+
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
+ IOMMU_SECURE_PTBL_INIT), &desc);
+ ptbl_ret = desc.ret[0];
+
+ if (ret) {
+ pr_err("scm call IOMMU_SECURE_PTBL_INIT failed\n");
+ return ret;
+ }
+
+ if (ptbl_ret) {
+ pr_err("scm call IOMMU_SECURE_PTBL_INIT extended ret fail\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_iommu_sec_pgtbl_init);
+
+static int msm_secure_map(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t size, int iommu_prot)
+{
+ struct msm_secure_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+ void *flush_va, *flush_va_end;
+ struct scm_desc desc = {0};
+ int ret = -EINVAL;
+ u32 resp;
+
+ if (!IS_ALIGNED(iova, SZ_1M) || !IS_ALIGNED(paddr, SZ_1M) ||
+ !IS_ALIGNED(size, SZ_1M))
+ return -EINVAL;
+
+ desc.args[0] = virt_to_phys(&paddr);
+ desc.args[1] = 1;
+ desc.args[2] = size;
+ desc.args[3] = cfg->arm_msm_secure_cfg.sec_id;
+ desc.args[4] = cfg->arm_msm_secure_cfg.cbndx;
+ desc.args[5] = iova;
+ desc.args[6] = size;
+ desc.args[7] = 0;
+
+ flush_va = &paddr;
+ flush_va_end = (void *)
+ (((unsigned long) flush_va) + sizeof(phys_addr_t));
+
+ /*
+ * Ensure that the buffer is in RAM by the time it gets to TZ
+ */
+ dmac_clean_range(flush_va, flush_va_end);
+
+ desc.arginfo = SCM_ARGS(8, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL,
+ SCM_VAL, SCM_VAL, SCM_VAL);
+
+ if (is_scm_armv8()) {
+ ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_MP,
+ IOMMU_SECURE_MAP2_FLAT), &desc);
+ resp = desc.ret[0];
+ }
+
+ if (ret || resp)
+ return -EINVAL;
+
+ return 0;
+}
+
+static dma_addr_t msm_secure_get_phys_addr(struct scatterlist *sg)
+{
+ /*
+ * Try sg_dma_address first so that we can
+ * map carveout regions that do not have a
+ * struct page associated with them.
+ */
+ dma_addr_t pa = sg_dma_address(sg);
+
+ if (pa == 0)
+ pa = sg_phys(sg);
+ return pa;
+}
+
+static int msm_secure_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents,
+ int iommu_prot, size_t *size)
+{
+ struct msm_secure_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+ int ret = -EINVAL;
+ struct scatterlist *tmp, *sgiter;
+ dma_addr_t *pa_list = 0;
+ unsigned int cnt, offset = 0, chunk_offset = 0;
+ dma_addr_t pa;
+ void *flush_va, *flush_va_end;
+ unsigned long len = 0;
+ struct scm_desc desc = {0};
+ int i;
+ u32 resp;
+
+ for_each_sg(sg, tmp, nents, i)
+ len += tmp->length;
+
+ if (!IS_ALIGNED(iova, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
+ return -EINVAL;
+
+ if (sg->length == len) {
+ cnt = 1;
+ pa = msm_secure_get_phys_addr(sg);
+ if (!IS_ALIGNED(pa, SZ_1M))
+ return -EINVAL;
+
+ desc.args[0] = virt_to_phys(&pa);
+ desc.args[1] = cnt;
+ desc.args[2] = len;
+ flush_va = &pa;
+ } else {
+ sgiter = sg;
+ if (!IS_ALIGNED(sgiter->length, SZ_1M))
+ return -EINVAL;
+ cnt = sg->length / SZ_1M;
+ while ((sgiter = sg_next(sgiter))) {
+ if (!IS_ALIGNED(sgiter->length, SZ_1M))
+ return -EINVAL;
+ cnt += sgiter->length / SZ_1M;
+ }
+
+ pa_list = kmalloc_array(cnt, sizeof(*pa_list), GFP_KERNEL);
+ if (!pa_list)
+ return -ENOMEM;
+
+ sgiter = sg;
+ cnt = 0;
+ pa = msm_secure_get_phys_addr(sgiter);
+ while (offset < len) {
+
+ if (!IS_ALIGNED(pa, SZ_1M)) {
+ kfree(pa_list);
+ return -EINVAL;
+ }
+
+ pa_list[cnt] = pa + chunk_offset;
+ chunk_offset += SZ_1M;
+ offset += SZ_1M;
+ cnt++;
+
+ if (chunk_offset >= sgiter->length && offset < len) {
+ chunk_offset = 0;
+ sgiter = sg_next(sgiter);
+ pa = msm_secure_get_phys_addr(sgiter);
+ }
+ }
+
+ desc.args[0] = virt_to_phys(pa_list);
+ desc.args[1] = cnt;
+ desc.args[2] = SZ_1M;
+ flush_va = pa_list;
+ }
+
+ desc.args[3] = cfg->arm_msm_secure_cfg.sec_id;
+ desc.args[4] = cfg->arm_msm_secure_cfg.cbndx;
+ desc.args[5] = iova;
+ desc.args[6] = len;
+ desc.args[7] = 0;
+
+ desc.arginfo = SCM_ARGS(8, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL,
+ SCM_VAL, SCM_VAL, SCM_VAL);
+
+ /*
+ * Ensure that the buffer is in RAM by the time it gets to TZ
+ */
+
+ flush_va_end = (void *) (((unsigned long) flush_va) +
+ (cnt * sizeof(*pa_list)));
+ dmac_clean_range(flush_va, flush_va_end);
+
+ if (is_scm_armv8()) {
+ ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_MP,
+ IOMMU_SECURE_MAP2_FLAT), &desc);
+ resp = desc.ret[0];
+
+ if (ret || resp)
+ ret = -EINVAL;
+ else
+ ret = len;
+ }
+
+ kfree(pa_list);
+ return ret;
+}
+
+static size_t msm_secure_unmap(struct io_pgtable_ops *ops, unsigned long iova,
+ size_t len)
+{
+ struct msm_secure_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+ int ret = -EINVAL;
+ struct scm_desc desc = {0};
+
+ if (!IS_ALIGNED(iova, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
+ return ret;
+
+ desc.args[0] = cfg->arm_msm_secure_cfg.sec_id;
+ desc.args[1] = cfg->arm_msm_secure_cfg.cbndx;
+ desc.args[2] = iova;
+ desc.args[3] = len;
+ desc.args[4] = IOMMU_TLBINVAL_FLAG;
+ desc.arginfo = SCM_ARGS(5);
+
+ if (is_scm_armv8()) {
+ ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_MP,
+ IOMMU_SECURE_UNMAP2_FLAT), &desc);
+
+ if (!ret)
+ ret = len;
+ }
+ return ret;
+}
+
+static phys_addr_t msm_secure_iova_to_phys(struct io_pgtable_ops *ops,
+ unsigned long iova)
+{
+ return -EINVAL;
+}
+
+static struct msm_secure_io_pgtable *
+msm_secure_alloc_pgtable_data(struct io_pgtable_cfg *cfg)
+{
+ struct msm_secure_io_pgtable *data;
+
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ data->iop.ops = (struct io_pgtable_ops) {
+ .map = msm_secure_map,
+ .map_sg = msm_secure_map_sg,
+ .unmap = msm_secure_unmap,
+ .iova_to_phys = msm_secure_iova_to_phys,
+ };
+
+ return data;
+}
+
+static struct io_pgtable *
+msm_secure_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ struct msm_secure_io_pgtable *data =
+ msm_secure_alloc_pgtable_data(cfg);
+
+ return &data->iop;
+}
+
+static void msm_secure_free_pgtable(struct io_pgtable *iop)
+{
+ struct msm_secure_io_pgtable *data = io_pgtable_to_data(iop);
+
+ kfree(data);
+}
+
+struct io_pgtable_init_fns io_pgtable_arm_msm_secure_init_fns = {
+ .alloc = msm_secure_alloc_pgtable,
+ .free = msm_secure_free_pgtable,
+};
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
index 541abb2..0e30cfb 100644
--- a/drivers/iommu/io-pgtable.c
+++ b/drivers/iommu/io-pgtable.c
@@ -44,6 +44,9 @@
#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST
[ARM_V8L_FAST] = &io_pgtable_av8l_fast_init_fns,
#endif
+#ifdef CONFIG_MSM_TZ_SMMU
+ [ARM_MSM_SECURE] = &io_pgtable_arm_msm_secure_init_fns,
+#endif
};
static struct dentry *io_pgtable_top;
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
index b35016e..2f5488c 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/drivers/iommu/io-pgtable.h
@@ -3,6 +3,7 @@
#include <linux/bitops.h>
#include <linux/scatterlist.h>
+#include <soc/qcom/msm_tz_smmu.h>
/*
* Public API for use by IOMMU drivers
@@ -14,6 +15,7 @@
ARM_64_LPAE_S2,
ARM_V7S,
ARM_V8L_FAST,
+ ARM_MSM_SECURE,
IO_PGTABLE_NUM_FMTS,
};
@@ -128,6 +130,11 @@
u64 mair[2];
void *pmds;
} av8l_fast_cfg;
+
+ struct {
+ enum tz_smmu_device_id sec_id;
+ int cbndx;
+ } arm_msm_secure_cfg;
};
};
@@ -211,6 +218,8 @@
static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
{
+ if (!iop->cfg.tlb)
+ return;
iop->cfg.tlb->tlb_flush_all(iop->cookie);
iop->tlb_sync_pending = true;
}
@@ -218,12 +227,16 @@
static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop,
unsigned long iova, size_t size, size_t granule, bool leaf)
{
+ if (!iop->cfg.tlb)
+ return;
iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie);
iop->tlb_sync_pending = true;
}
static inline void io_pgtable_tlb_sync(struct io_pgtable *iop)
{
+ if (!iop->cfg.tlb)
+ return;
if (iop->tlb_sync_pending) {
iop->cfg.tlb->tlb_sync(iop->cookie);
iop->tlb_sync_pending = false;
@@ -248,6 +261,7 @@
extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
extern struct io_pgtable_init_fns io_pgtable_av8l_fast_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_msm_secure_init_fns;
/**
* io_pgtable_alloc_pages_exact:
diff --git a/drivers/leds/leds-qpnp.c b/drivers/leds/leds-qpnp.c
index e3cfffb..fca1909 100644
--- a/drivers/leds/leds-qpnp.c
+++ b/drivers/leds/leds-qpnp.c
@@ -1726,27 +1726,27 @@
rc);
return rc;
}
- period_us = led->rgb_cfg->pwm_cfg->pwm_period_us;
- if (period_us > INT_MAX / NSEC_PER_USEC) {
- duty_us = (period_us * led->cdev.brightness) /
- LED_FULL;
- rc = pwm_config_us(
- led->rgb_cfg->pwm_cfg->pwm_dev,
- duty_us,
- period_us);
- } else {
- duty_ns = ((period_us * NSEC_PER_USEC) /
- LED_FULL) * led->cdev.brightness;
- rc = pwm_config(
- led->rgb_cfg->pwm_cfg->pwm_dev,
- duty_ns,
- period_us * NSEC_PER_USEC);
- }
- if (rc < 0) {
- dev_err(&led->pdev->dev,
- "pwm config failed\n");
- return rc;
- }
+ }
+ period_us = led->rgb_cfg->pwm_cfg->pwm_period_us;
+ if (period_us > INT_MAX / NSEC_PER_USEC) {
+ duty_us = (period_us * led->cdev.brightness) /
+ LED_FULL;
+ rc = pwm_config_us(
+ led->rgb_cfg->pwm_cfg->pwm_dev,
+ duty_us,
+ period_us);
+ } else {
+ duty_ns = ((period_us * NSEC_PER_USEC) /
+ LED_FULL) * led->cdev.brightness;
+ rc = pwm_config(
+ led->rgb_cfg->pwm_cfg->pwm_dev,
+ duty_ns,
+ period_us * NSEC_PER_USEC);
+ }
+ if (rc < 0) {
+ dev_err(&led->pdev->dev,
+ "pwm config failed\n");
+ return rc;
}
rc = qpnp_led_masked_write(led,
RGB_LED_EN_CTL(led->base),
diff --git a/drivers/mailbox/qcom-rpmh-mailbox.c b/drivers/mailbox/qcom-rpmh-mailbox.c
index 160b858..00ebed9 100644
--- a/drivers/mailbox/qcom-rpmh-mailbox.c
+++ b/drivers/mailbox/qcom-rpmh-mailbox.c
@@ -126,6 +126,7 @@
struct rsc_drv {
struct mbox_controller mbox;
const char *name;
+ unsigned long addr;
void __iomem *base; /* start address of the RSC's registers */
void __iomem *reg_base; /* start address for DRV specific register */
int drv_id;
@@ -148,7 +149,7 @@
/* Log to IPC and Ftrace */
#define log_send_msg(drv, m, n, i, a, d, c, t) do { \
- trace_rpmh_send_msg(drv->name, m, n, i, a, d, c, t); \
+ trace_rpmh_send_msg(drv->name, drv->addr, m, n, i, a, d, c, t); \
ipc_log_string(drv->ipc_log_ctx, \
"send msg: m=%d n=%d msgid=0x%x addr=0x%x data=0x%x cmpl=%d trigger=%d", \
m, n, i, a, d, c, t); \
@@ -1125,6 +1126,7 @@
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EINVAL;
+ drv->addr = res->start;
drv->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(drv->base))
return PTR_ERR(drv->base);
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.c b/drivers/media/platform/msm/camera/cam_core/cam_context.c
index 84402e4..7e6d999 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.c
@@ -254,6 +254,35 @@
return rc;
}
+int cam_context_handle_flush_dev(struct cam_context *ctx,
+ struct cam_flush_dev_cmd *cmd)
+{
+ int rc;
+
+ if (!ctx->state_machine) {
+ CAM_ERR(CAM_CORE, "context is not ready");
+ return -EINVAL;
+ }
+
+ if (!cmd) {
+ CAM_ERR(CAM_CORE, "Invalid flush device command payload");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctx->ctx_mutex);
+ if (ctx->state_machine[ctx->state].ioctl_ops.flush_dev) {
+ rc = ctx->state_machine[ctx->state].ioctl_ops.flush_dev(
+ ctx, cmd);
+ } else {
+ CAM_ERR(CAM_CORE, "No flush device in dev %d, state %d",
+ ctx->dev_hdl, ctx->state);
+ rc = -EPROTO;
+ }
+ mutex_unlock(&ctx->ctx_mutex);
+
+ return rc;
+}
+
int cam_context_handle_config_dev(struct cam_context *ctx,
struct cam_config_dev_cmd *cmd)
{
@@ -409,8 +438,10 @@
void cam_context_putref(struct cam_context *ctx)
{
kref_put(&ctx->refcount, cam_node_put_ctxt_to_free_list);
- CAM_DBG(CAM_CORE, "ctx device hdl %ld, ref count %d",
- ctx->dev_hdl, atomic_read(&(ctx->refcount.refcount)));
+ CAM_DBG(CAM_CORE,
+ "ctx device hdl %ld, ref count %d, dev_name %s",
+ ctx->dev_hdl, atomic_read(&(ctx->refcount.refcount)),
+ ctx->dev_name);
}
void cam_context_getref(struct cam_context *ctx)
@@ -419,6 +450,8 @@
/* should never happen */
WARN(1, "cam_context_getref fail\n");
}
- CAM_DBG(CAM_CORE, "ctx device hdl %ld, ref count %d",
- ctx->dev_hdl, atomic_read(&(ctx->refcount.refcount)));
+ CAM_DBG(CAM_CORE,
+ "ctx device hdl %ld, ref count %d, dev_name %s",
+ ctx->dev_hdl, atomic_read(&(ctx->refcount.refcount)),
+ ctx->dev_name);
}
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.h b/drivers/media/platform/msm/camera/cam_core/cam_context.h
index 6d1589e..c823b7a 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.h
@@ -84,6 +84,7 @@
* @config_dev: Function pointer for config device
* @start_dev: Function pointer for start device
* @stop_dev: Function pointer for stop device
+ * @flush_dev: Function pointer for flush device
*
*/
struct cam_ctx_ioctl_ops {
@@ -97,6 +98,8 @@
struct cam_start_stop_dev_cmd *cmd);
int (*stop_dev)(struct cam_context *ctx,
struct cam_start_stop_dev_cmd *cmd);
+ int (*flush_dev)(struct cam_context *ctx,
+ struct cam_flush_dev_cmd *cmd);
};
/**
@@ -306,6 +309,18 @@
struct cam_config_dev_cmd *cmd);
/**
+ * cam_context_handle_flush_dev()
+ *
+ * @brief: Handle flush device command
+ *
+ * @ctx: Object pointer for cam_context
+ * @cmd: Flush device command payload
+ *
+ */
+int cam_context_handle_flush_dev(struct cam_context *ctx,
+ struct cam_flush_dev_cmd *cmd);
+
+/**
* cam_context_handle_start_dev()
*
* @brief: Handle start device command
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
index 6b872b9..aab1a1a 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
@@ -146,6 +146,7 @@
{
struct cam_ctx_request *req = data;
struct cam_context *ctx = NULL;
+ struct cam_flush_dev_cmd flush_cmd;
struct cam_req_mgr_apply_request apply;
int rc;
@@ -169,14 +170,22 @@
* in a critical section which is provided by this
* mutex.
*/
+ if (status == CAM_SYNC_STATE_SIGNALED_ERROR) {
+ CAM_DBG(CAM_CTXT, "fence error: %d", sync_obj);
+ flush_cmd.req_id = req->request_id;
+ cam_context_flush_req_to_hw(ctx, &flush_cmd);
+ cam_context_putref(ctx);
+ return;
+ }
+
mutex_lock(&ctx->sync_mutex);
if (!req->flushed) {
cam_context_apply_req_to_hw(req, &apply);
mutex_unlock(&ctx->sync_mutex);
} else {
- mutex_unlock(&ctx->sync_mutex);
- req->ctx = NULL;
req->flushed = 0;
+ req->ctx = NULL;
+ mutex_unlock(&ctx->sync_mutex);
spin_lock(&ctx->lock);
list_del_init(&req->list);
list_add_tail(&req->list, &ctx->free_req_list);
@@ -413,6 +422,174 @@
return rc;
}
+int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
+{
+ struct cam_hw_flush_args flush_args;
+ struct list_head temp_list;
+ struct cam_ctx_request *req;
+ uint32_t i;
+ int rc = 0;
+
+ /*
+ * flush pending requests, take the sync lock to synchronize with the
+ * sync callback thread so that the sync cb thread does not try to
+ * submit request to h/w while the request is being flushed
+ */
+ mutex_lock(&ctx->sync_mutex);
+ INIT_LIST_HEAD(&temp_list);
+ spin_lock(&ctx->lock);
+ list_splice_init(&ctx->pending_req_list, &temp_list);
+ spin_unlock(&ctx->lock);
+ flush_args.num_req_pending = 0;
+ while (!list_empty(&temp_list)) {
+ req = list_first_entry(&temp_list,
+ struct cam_ctx_request, list);
+ list_del_init(&req->list);
+ req->flushed = 1;
+ flush_args.flush_req_pending[flush_args.num_req_pending++] =
+ req->req_priv;
+ for (i = 0; i < req->num_out_map_entries; i++)
+ if (req->out_map_entries[i].sync_id != -1)
+ cam_sync_signal(req->out_map_entries[i].sync_id,
+ CAM_SYNC_STATE_SIGNALED_ERROR);
+ }
+ mutex_unlock(&ctx->sync_mutex);
+
+ if (ctx->hw_mgr_intf->hw_flush) {
+ flush_args.num_req_active = 0;
+ spin_lock(&ctx->lock);
+ INIT_LIST_HEAD(&temp_list);
+ list_splice_init(&ctx->active_req_list, &temp_list);
+ list_for_each_entry(req, &temp_list, list) {
+ flush_args.flush_req_active[flush_args.num_req_active++]
+ = req->req_priv;
+ }
+ spin_unlock(&ctx->lock);
+
+ if (flush_args.num_req_pending || flush_args.num_req_active) {
+ flush_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+ flush_args.flush_type = CAM_FLUSH_TYPE_ALL;
+ ctx->hw_mgr_intf->hw_flush(
+ ctx->hw_mgr_intf->hw_mgr_priv, &flush_args);
+ }
+ }
+
+ while (!list_empty(&temp_list)) {
+ req = list_first_entry(&temp_list,
+ struct cam_ctx_request, list);
+ list_del_init(&req->list);
+ for (i = 0; i < req->num_out_map_entries; i++)
+ if (req->out_map_entries[i].sync_id != -1) {
+ cam_sync_signal(req->out_map_entries[i].sync_id,
+ CAM_SYNC_STATE_SIGNALED_ERROR);
+ }
+
+ spin_lock(&ctx->lock);
+ list_add_tail(&req->list, &ctx->free_req_list);
+ spin_unlock(&ctx->lock);
+ req->ctx = NULL;
+ }
+ INIT_LIST_HEAD(&ctx->active_req_list);
+
+ return rc;
+}
+
+int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
+ struct cam_flush_dev_cmd *cmd)
+{
+ struct cam_ctx_request *req = NULL;
+ struct cam_hw_flush_args flush_args;
+ uint32_t i;
+ int rc = 0;
+
+ flush_args.num_req_pending = 0;
+ flush_args.num_req_active = 0;
+ mutex_lock(&ctx->sync_mutex);
+ spin_lock(&ctx->lock);
+ list_for_each_entry(req, &ctx->pending_req_list, list) {
+ if (req->request_id != cmd->req_id)
+ continue;
+
+ req->flushed = 1;
+ flush_args.flush_req_pending[flush_args.num_req_pending++] =
+ req->req_priv;
+ break;
+ }
+ spin_unlock(&ctx->lock);
+ mutex_unlock(&ctx->sync_mutex);
+
+ if (ctx->hw_mgr_intf->hw_flush) {
+ if (!flush_args.num_req_pending) {
+ spin_lock(&ctx->lock);
+ list_for_each_entry(req, &ctx->active_req_list, list) {
+ if (req->request_id != cmd->req_id)
+ continue;
+
+ flush_args.flush_req_active[
+ flush_args.num_req_active++] =
+ req->req_priv;
+ break;
+ }
+ spin_unlock(&ctx->lock);
+ }
+
+ if (flush_args.num_req_pending || flush_args.num_req_active) {
+ flush_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+ flush_args.flush_type = CAM_FLUSH_TYPE_REQ;
+ ctx->hw_mgr_intf->hw_flush(
+ ctx->hw_mgr_intf->hw_mgr_priv, &flush_args);
+ }
+ }
+
+ if (req) {
+ if (flush_args.num_req_pending || flush_args.num_req_active) {
+ list_del_init(&req->list);
+ for (i = 0; i < req->num_out_map_entries; i++)
+ if (req->out_map_entries[i].sync_id != -1)
+ cam_sync_signal(
+ req->out_map_entries[i].sync_id,
+ CAM_SYNC_STATE_SIGNALED_ERROR);
+ spin_lock(&ctx->lock);
+ list_add_tail(&req->list, &ctx->free_req_list);
+ spin_unlock(&ctx->lock);
+ req->ctx = NULL;
+ }
+ }
+
+ return rc;
+}
+
+int32_t cam_context_flush_dev_to_hw(struct cam_context *ctx,
+ struct cam_flush_dev_cmd *cmd)
+{
+
+ int rc = 0;
+
+ if (!ctx || !cmd) {
+ CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (!ctx->hw_mgr_intf) {
+ CAM_ERR(CAM_CTXT, "HW interface is not ready");
+ rc = -EFAULT;
+ goto end;
+ }
+
+ if (cmd->flush_type == CAM_FLUSH_TYPE_ALL)
+ rc = cam_context_flush_ctx_to_hw(ctx);
+ else if (cmd->flush_type == CAM_FLUSH_TYPE_REQ)
+ rc = cam_context_flush_req_to_hw(ctx, cmd);
+ else {
+ rc = -EINVAL;
+ CAM_ERR(CAM_CORE, "Invalid flush type %d", cmd->flush_type);
+ }
+
+end:
+ return rc;
+}
+
int32_t cam_context_start_dev_to_hw(struct cam_context *ctx,
struct cam_start_stop_dev_cmd *cmd)
{
@@ -457,10 +634,7 @@
int32_t cam_context_stop_dev_to_hw(struct cam_context *ctx)
{
int rc = 0;
- uint32_t i;
struct cam_hw_stop_args stop;
- struct cam_ctx_request *req;
- struct list_head temp_list;
if (!ctx) {
CAM_ERR(CAM_CTXT, "Invalid input param");
@@ -478,27 +652,11 @@
if (rc)
goto end;
- /*
- * flush pending requests, take the sync lock to synchronize with the
- * sync callback thread so that the sync cb thread does not try to
- * submit request to h/w while the request is being flushed
- */
- mutex_lock(&ctx->sync_mutex);
- INIT_LIST_HEAD(&temp_list);
- spin_lock(&ctx->lock);
- list_splice_init(&ctx->pending_req_list, &temp_list);
- spin_unlock(&ctx->lock);
- while (!list_empty(&temp_list)) {
- req = list_first_entry(&temp_list,
- struct cam_ctx_request, list);
- list_del_init(&req->list);
- req->flushed = 1;
- for (i = 0; i < req->num_out_map_entries; i++)
- if (req->out_map_entries[i].sync_id != -1)
- cam_sync_signal(req->out_map_entries[i].sync_id,
- CAM_SYNC_STATE_SIGNALED_ERROR);
+ if (ctx->ctxt_to_hw_map) {
+ rc = cam_context_flush_ctx_to_hw(ctx);
+ if (rc)
+ goto end;
}
- mutex_unlock(&ctx->sync_mutex);
/* stop hw first */
if (ctx->hw_mgr_intf->hw_stop) {
@@ -507,36 +665,6 @@
&stop);
}
- /*
- * flush active queue, at this point h/w layer below does not have any
- * reference to requests in active queue.
- */
- INIT_LIST_HEAD(&temp_list);
- spin_lock(&ctx->lock);
- list_splice_init(&ctx->active_req_list, &temp_list);
- spin_unlock(&ctx->lock);
-
- while (!list_empty(&temp_list)) {
- req = list_first_entry(&temp_list,
- struct cam_ctx_request, list);
- list_del_init(&req->list);
- CAM_DBG(CAM_CTXT, "signal fence in active list. fence num %d",
- req->num_out_map_entries);
- for (i = 0; i < req->num_out_map_entries; i++)
- if (req->out_map_entries[i].sync_id != -1)
- cam_sync_signal(req->out_map_entries[i].sync_id,
- CAM_SYNC_STATE_SIGNALED_ERROR);
- /*
- * The spin lock should be taken here to guard the free list,
- * as sync cb thread could be adding a pending req to free list
- */
- spin_lock(&ctx->lock);
- list_add_tail(&req->list, &ctx->free_req_list);
- req->ctx = NULL;
- spin_unlock(&ctx->lock);
- }
-
end:
return rc;
}
-
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h
index 45d9e56..9b95ead 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h
@@ -26,5 +26,10 @@
int32_t cam_context_start_dev_to_hw(struct cam_context *ctx,
struct cam_start_stop_dev_cmd *cmd);
int32_t cam_context_stop_dev_to_hw(struct cam_context *ctx);
+int32_t cam_context_flush_dev_to_hw(struct cam_context *ctx,
+ struct cam_flush_dev_cmd *cmd);
+int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx);
+int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
+ struct cam_flush_dev_cmd *cmd);
#endif /* _CAM_CONTEXT_UTILS_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_hw_intf.h b/drivers/media/platform/msm/camera/cam_core/cam_hw_intf.h
index 3a997ae..bd2b789 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_hw_intf.h
@@ -34,6 +34,7 @@
* @read: Function pointer for read hardware registers
* @write: Function pointer for Write hardware registers
* @process_cmd: Function pointer for additional hardware controls
+ * @flush_cmd: Function pointer for flush requests
*
*/
struct cam_hw_ops {
@@ -59,6 +60,8 @@
void *write_args, uint32_t arg_size);
int (*process_cmd)(void *hw_priv,
uint32_t cmd_type, void *cmd_args, uint32_t arg_size);
+ int (*flush)(void *hw_priv,
+ void *flush_args, uint32_t arg_size);
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
index 4746152..a90b3d9 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
@@ -182,6 +182,26 @@
};
/**
+ * struct cam_hw_flush_args - Flush arguments
+ *
+ * @ctxt_to_hw_map: HW context from the acquire
+ * @num_req_pending: Num request to flush, valid when flush type is REQ
+ * @flush_req_pending: Request pending pointers to flush
+ * @num_req_active: Num request to flush, valid when flush type is REQ
+ * @flush_req_active: Request active pointers to flush
+ * @flush_type: The flush type
+ *
+ */
+struct cam_hw_flush_args {
+ void *ctxt_to_hw_map;
+ uint32_t num_req_pending;
+ void *flush_req_pending[20];
+ uint32_t num_req_active;
+ void *flush_req_active[20];
+ enum flush_type_t flush_type;
+};
+
+/**
* cam_hw_mgr_intf - HW manager interface
*
* @hw_mgr_priv: HW manager object
@@ -205,6 +225,7 @@
* hardware manager
* @hw_open: Function pointer for HW init
* @hw_close: Function pointer for HW deinit
+ * @hw_flush: Function pointer for HW flush
*
*/
struct cam_hw_mgr_intf {
@@ -222,6 +243,7 @@
int (*hw_cmd)(void *hw_priv, void *write_args);
int (*hw_open)(void *hw_priv, void *fw_download_args);
int (*hw_close)(void *hw_priv, void *hw_close_args);
+ int (*hw_flush)(void *hw_priv, void *hw_flush_args);
};
#endif /* _CAM_HW_MGR_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.c b/drivers/media/platform/msm/camera/cam_core/cam_node.c
index 1f0213e..a5977b3 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_node.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.c
@@ -192,6 +192,39 @@
return rc;
}
+static int __cam_node_handle_flush_dev(struct cam_node *node,
+ struct cam_flush_dev_cmd *flush)
+{
+ struct cam_context *ctx = NULL;
+ int rc;
+
+ if (!flush)
+ return -EINVAL;
+
+ if (flush->dev_handle <= 0) {
+ CAM_ERR(CAM_CORE, "Invalid device handle for context");
+ return -EINVAL;
+ }
+
+ if (flush->session_handle <= 0) {
+ CAM_ERR(CAM_CORE, "Invalid session handle for context");
+ return -EINVAL;
+ }
+
+ ctx = (struct cam_context *)cam_get_device_priv(flush->dev_handle);
+ if (!ctx) {
+ CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+ flush->dev_handle);
+ return -EINVAL;
+ }
+
+ rc = cam_context_handle_flush_dev(ctx, flush);
+ if (rc)
+ CAM_ERR(CAM_CORE, "FLush failure for node %s", node->name);
+
+ return rc;
+}
+
static int __cam_node_handle_release_dev(struct cam_node *node,
struct cam_release_dev_cmd *release)
{
@@ -491,6 +524,20 @@
}
break;
}
+ case CAM_FLUSH_REQ: {
+ struct cam_flush_dev_cmd flush;
+
+ if (copy_from_user(&flush, (void __user *)cmd->handle,
+ sizeof(flush)))
+ rc = -EFAULT;
+ else {
+ rc = __cam_node_handle_flush_dev(node, &flush);
+ if (rc)
+ CAM_ERR(CAM_CORE,
+ "flush device failed(rc = %d)", rc);
+ }
+ break;
+ }
default:
CAM_ERR(CAM_CORE, "Unknown op code %d", cmd->op_code);
rc = -EINVAL;
diff --git a/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c b/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c
index 78c1dd3..04d65dd 100644
--- a/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c
+++ b/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c
@@ -124,6 +124,17 @@
return rc;
}
+static int __cam_fd_ctx_flush_dev_in_activated(struct cam_context *ctx,
+ struct cam_flush_dev_cmd *cmd)
+{
+ int rc;
+
+ rc = cam_context_flush_dev_to_hw(ctx, cmd);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Failed to flush device, rc=%d", rc);
+
+ return rc;
+}
static int __cam_fd_ctx_config_dev_in_activated(
struct cam_context *ctx, struct cam_config_dev_cmd *cmd)
{
@@ -191,6 +202,7 @@
.stop_dev = __cam_fd_ctx_stop_dev_in_activated,
.release_dev = __cam_fd_ctx_release_dev_in_activated,
.config_dev = __cam_fd_ctx_config_dev_in_activated,
+ .flush_dev = __cam_fd_ctx_flush_dev_in_activated,
},
.crm_ops = {},
.irq_ops = __cam_fd_ctx_handle_irq_in_activated,
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
index e57066d..a15ccdc 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
@@ -207,7 +207,7 @@
return -EINVAL;
}
- CAM_DBG(CAM_FD, "ctx index=%d, hw_ctx=%d", hw_ctx->ctx_index,
+ CAM_DBG(CAM_FD, "ctx_index=%u, hw_ctx=%d", hw_ctx->ctx_index,
hw_ctx->device_index);
*hw_device = &hw_mgr->hw_device[hw_ctx->device_index];
@@ -335,7 +335,7 @@
/* Update required info in hw context */
hw_ctx->device_index = i;
- CAM_DBG(CAM_FD, "ctx index=%d, device_index=%d", hw_ctx->ctx_index,
+ CAM_DBG(CAM_FD, "ctx index=%u, device_index=%d", hw_ctx->ctx_index,
hw_ctx->device_index);
return 0;
@@ -1239,7 +1239,7 @@
return -EPERM;
}
- CAM_DBG(CAM_FD, "ctx index=%d, device_index=%d", hw_ctx->ctx_index,
+ CAM_DBG(CAM_FD, "ctx index=%u, device_index=%d", hw_ctx->ctx_index,
hw_ctx->device_index);
rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
@@ -1266,26 +1266,125 @@
return rc;
}
-static int cam_fd_mgr_hw_flush(void *hw_mgr_priv,
- struct cam_fd_hw_mgr_ctx *hw_ctx)
+static int cam_fd_mgr_hw_flush_req(void *hw_mgr_priv,
+ struct cam_hw_flush_args *flush_args)
{
int rc = 0;
- struct cam_fd_mgr_frame_request *frame_req, *req_temp;
- struct cam_fd_hw_stop_args hw_stop_args;
+ struct cam_fd_mgr_frame_request *frame_req, *req_temp, *flush_req;
struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
struct cam_fd_device *hw_device;
+ struct cam_fd_hw_stop_args hw_stop_args;
+ struct cam_fd_hw_mgr_ctx *hw_ctx;
+ uint32_t i = 0;
- if (!hw_mgr_priv || !hw_ctx) {
- CAM_ERR(CAM_FD, "Invalid arguments %pK %pK",
- hw_mgr_priv, hw_ctx);
- return -EINVAL;
- }
+ hw_ctx = (struct cam_fd_hw_mgr_ctx *)flush_args->ctxt_to_hw_map;
- if (!hw_ctx->ctx_in_use) {
+ if (!hw_ctx || !hw_ctx->ctx_in_use) {
CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
return -EPERM;
}
- CAM_DBG(CAM_FD, "ctx index=%d, hw_ctx=%d", hw_ctx->ctx_index,
+ CAM_DBG(CAM_FD, "ctx index=%u, hw_ctx=%d", hw_ctx->ctx_index,
+ hw_ctx->device_index);
+
+ rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+ return rc;
+ }
+
+ mutex_lock(&hw_mgr->frame_req_mutex);
+ for (i = 0; i < flush_args->num_req_active; i++) {
+ flush_req = (struct cam_fd_mgr_frame_request *)
+ flush_args->flush_req_active[i];
+
+ list_for_each_entry_safe(frame_req, req_temp,
+ &hw_mgr->frame_pending_list_high, list) {
+ if (frame_req->hw_ctx != hw_ctx)
+ continue;
+
+ if (frame_req->request_id != flush_req->request_id)
+ continue;
+
+ list_del_init(&frame_req->list);
+ break;
+ }
+
+ list_for_each_entry_safe(frame_req, req_temp,
+ &hw_mgr->frame_pending_list_normal, list) {
+ if (frame_req->hw_ctx != hw_ctx)
+ continue;
+
+ if (frame_req->request_id != flush_req->request_id)
+ continue;
+
+ list_del_init(&frame_req->list);
+ break;
+ }
+
+ list_for_each_entry_safe(frame_req, req_temp,
+ &hw_mgr->frame_processing_list, list) {
+ if (frame_req->hw_ctx != hw_ctx)
+ continue;
+
+ if (frame_req->request_id != flush_req->request_id)
+ continue;
+
+ list_del_init(&frame_req->list);
+
+ mutex_lock(&hw_device->lock);
+ if ((hw_device->ready_to_process == true) ||
+ (hw_device->cur_hw_ctx != hw_ctx))
+ goto unlock_dev_flush_req;
+
+ if (hw_device->hw_intf->hw_ops.stop) {
+ hw_stop_args.hw_ctx = hw_ctx;
+ rc = hw_device->hw_intf->hw_ops.stop(
+ hw_device->hw_intf->hw_priv,
+ &hw_stop_args,
+ sizeof(hw_stop_args));
+ if (rc) {
+ CAM_ERR(CAM_FD,
+ "Failed in HW Stop %d", rc);
+ goto unlock_dev_flush_req;
+ }
+ hw_device->ready_to_process = true;
+ }
+
+unlock_dev_flush_req:
+ mutex_unlock(&hw_device->lock);
+ break;
+ }
+ }
+ mutex_unlock(&hw_mgr->frame_req_mutex);
+
+ for (i = 0; i < flush_args->num_req_pending; i++) {
+ flush_req = (struct cam_fd_mgr_frame_request *)
+ flush_args->flush_req_pending[i];
+ cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+ &flush_req);
+ }
+
+ return rc;
+}
+
+static int cam_fd_mgr_hw_flush_ctx(void *hw_mgr_priv,
+ struct cam_hw_flush_args *flush_args)
+{
+ int rc = 0;
+ struct cam_fd_mgr_frame_request *frame_req, *req_temp, *flush_req;
+ struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+ struct cam_fd_device *hw_device;
+ struct cam_fd_hw_stop_args hw_stop_args;
+ struct cam_fd_hw_mgr_ctx *hw_ctx;
+ uint32_t i = 0;
+
+ hw_ctx = (struct cam_fd_hw_mgr_ctx *)flush_args->ctxt_to_hw_map;
+
+ if (!hw_ctx || !hw_ctx->ctx_in_use) {
+ CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+ return -EPERM;
+ }
+ CAM_DBG(CAM_FD, "ctx index=%u, hw_ctx=%d", hw_ctx->ctx_index,
hw_ctx->device_index);
rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
@@ -1317,28 +1416,64 @@
continue;
list_del_init(&frame_req->list);
+ mutex_lock(&hw_device->lock);
+ if ((hw_device->ready_to_process == true) ||
+ (hw_device->cur_hw_ctx != hw_ctx))
+ goto unlock_dev_flush_ctx;
+
+ if (hw_device->hw_intf->hw_ops.stop) {
+ hw_stop_args.hw_ctx = hw_ctx;
+ rc = hw_device->hw_intf->hw_ops.stop(
+ hw_device->hw_intf->hw_priv, &hw_stop_args,
+ sizeof(hw_stop_args));
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in HW Stop %d", rc);
+ goto unlock_dev_flush_ctx;
+ }
+ hw_device->ready_to_process = true;
+ }
+
+unlock_dev_flush_ctx:
+ mutex_unlock(&hw_device->lock);
}
mutex_unlock(&hw_mgr->frame_req_mutex);
- mutex_lock(&hw_device->lock);
- if ((hw_device->ready_to_process == true) ||
- (hw_device->cur_hw_ctx != hw_ctx))
- goto end;
-
- if (hw_device->hw_intf->hw_ops.stop) {
- hw_stop_args.hw_ctx = hw_ctx;
- rc = hw_device->hw_intf->hw_ops.stop(
- hw_device->hw_intf->hw_priv, &hw_stop_args,
- sizeof(hw_stop_args));
- if (rc) {
- CAM_ERR(CAM_FD, "Failed in HW Stop %d", rc);
- goto end;
- }
- hw_device->ready_to_process = true;
+ for (i = 0; i < flush_args->num_req_pending; i++) {
+ flush_req = (struct cam_fd_mgr_frame_request *)
+ flush_args->flush_req_pending[i];
+ cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+ &flush_req);
}
-end:
- mutex_unlock(&hw_device->lock);
+ return rc;
+}
+
+static int cam_fd_mgr_hw_flush(void *hw_mgr_priv,
+ void *hw_flush_args)
+{
+ int rc = 0;
+ struct cam_hw_flush_args *flush_args =
+ (struct cam_hw_flush_args *)hw_flush_args;
+
+ if (!hw_mgr_priv || !hw_flush_args) {
+ CAM_ERR(CAM_FD, "Invalid arguments %pK %pK",
+ hw_mgr_priv, hw_flush_args);
+ return -EINVAL;
+ }
+
+ switch (flush_args->flush_type) {
+ case CAM_FLUSH_TYPE_REQ:
+ rc = cam_fd_mgr_hw_flush_req(hw_mgr_priv, flush_args);
+ break;
+ case CAM_FLUSH_TYPE_ALL:
+ rc = cam_fd_mgr_hw_flush_ctx(hw_mgr_priv, flush_args);
+ break;
+ default:
+ rc = -EINVAL;
+ CAM_ERR(CAM_FD, "Invalid flush type %d",
+ flush_args->flush_type);
+ break;
+ }
return rc;
}
@@ -1363,7 +1498,7 @@
CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
return -EPERM;
}
- CAM_DBG(CAM_FD, "ctx index=%d, hw_ctx=%d", hw_ctx->ctx_index,
+ CAM_DBG(CAM_FD, "ctx index=%u, hw_ctx=%d", hw_ctx->ctx_index,
hw_ctx->device_index);
rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
@@ -1375,10 +1510,6 @@
CAM_DBG(CAM_FD, "FD Device ready_to_process = %d",
hw_device->ready_to_process);
- rc = cam_fd_mgr_hw_flush(hw_mgr, hw_ctx);
- if (rc)
- CAM_ERR(CAM_FD, "FD failed to flush");
-
if (hw_device->hw_intf->hw_ops.deinit) {
hw_deinit_args.hw_ctx = hw_ctx;
hw_deinit_args.ctx_hw_private = hw_ctx->ctx_hw_private;
@@ -1791,6 +1922,7 @@
hw_mgr_intf->hw_read = NULL;
hw_mgr_intf->hw_write = NULL;
hw_mgr_intf->hw_close = NULL;
+ hw_mgr_intf->hw_flush = cam_fd_mgr_hw_flush;
return rc;
diff --git a/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
index 0c37994..d47350c 100644
--- a/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
+++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
@@ -70,6 +70,18 @@
return rc;
}
+static int __cam_icp_flush_dev_in_ready(struct cam_context *ctx,
+ struct cam_flush_dev_cmd *cmd)
+{
+ int rc;
+
+ rc = cam_context_flush_dev_to_hw(ctx, cmd);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Failed to flush device");
+
+ return rc;
+}
+
static int __cam_icp_config_dev_in_ready(struct cam_context *ctx,
struct cam_config_dev_cmd *cmd)
{
@@ -140,6 +152,7 @@
.release_dev = __cam_icp_release_dev_in_acquired,
.start_dev = __cam_icp_start_dev_in_acquired,
.config_dev = __cam_icp_config_dev_in_ready,
+ .flush_dev = __cam_icp_flush_dev_in_ready,
},
.crm_ops = {},
.irq_ops = __cam_icp_handle_buf_done_in_ready,
@@ -150,6 +163,7 @@
.stop_dev = __cam_icp_stop_dev_in_ready,
.release_dev = __cam_icp_release_dev_in_ready,
.config_dev = __cam_icp_config_dev_in_ready,
+ .flush_dev = __cam_icp_flush_dev_in_ready,
},
.crm_ops = {},
.irq_ops = __cam_icp_handle_buf_done_in_ready,
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
index c18a5e4..25e1ce7 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
@@ -93,6 +93,8 @@
CAM_ERR(CAM_ICP, "cpas stop is failed");
else
core_info->cpas_start = false;
+ } else {
+ core_info->clk_enable = true;
}
return rc;
@@ -119,9 +121,10 @@
return -EINVAL;
}
- rc = cam_bps_disable_soc_resources(soc_info);
+ rc = cam_bps_disable_soc_resources(soc_info, core_info->clk_enable);
if (rc)
CAM_ERR(CAM_ICP, "soc disable is failed: %d", rc);
+ core_info->clk_enable = false;
if (core_info->cpas_start) {
if (cam_cpas_stop(core_info->cpas_handle))
@@ -276,8 +279,30 @@
uint32_t clk_rate = *(uint32_t *)cmd_args;
CAM_DBG(CAM_ICP, "bps_src_clk rate = %d", (int)clk_rate);
+ if (!core_info->clk_enable) {
+ cam_bps_handle_pc(bps_dev);
+ cam_cpas_reg_write(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP,
+ hw_info->pwr_ctrl, true, 0x0);
+ rc = cam_bps_toggle_clk(soc_info, true);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Enable failed");
+ else
+ core_info->clk_enable = true;
+ rc = cam_bps_handle_resume(bps_dev);
+ if (rc)
+ CAM_ERR(CAM_ICP, "handle resume failed");
+ }
+ CAM_DBG(CAM_ICP, "clock rate %d", clk_rate);
rc = cam_bps_update_clk_rate(soc_info, clk_rate);
- }
+ if (rc)
+ CAM_ERR(CAM_ICP, "Failed to update clk");
+ }
+ break;
+ case CAM_ICP_BPS_CMD_DISABLE_CLK:
+ if (core_info->clk_enable == true)
+ cam_bps_toggle_clk(soc_info, false);
+ core_info->clk_enable = false;
break;
default:
break;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.h
index 0a28bb4f..d979321 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.h
@@ -33,6 +33,7 @@
struct cam_bps_device_hw_info *bps_hw_info;
uint32_t cpas_handle;
bool cpas_start;
+ bool clk_enable;
};
int cam_bps_init_hw(void *device_priv,
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
index 400e1e7..b7b636c 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
@@ -72,11 +72,13 @@
return rc;
}
-int cam_bps_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+int cam_bps_disable_soc_resources(struct cam_hw_soc_info *soc_info,
+ bool disable_clk)
{
int rc = 0;
- rc = cam_soc_util_disable_platform_resource(soc_info, true, false);
+ rc = cam_soc_util_disable_platform_resource(soc_info, disable_clk,
+ false);
if (rc)
CAM_ERR(CAM_ICP, "disable platform failed");
@@ -142,3 +144,15 @@
return cam_soc_util_set_clk_rate(soc_info->clk[soc_info->src_clk_idx],
soc_info->clk_name[soc_info->src_clk_idx], clk_rate);
}
+
+int cam_bps_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable)
+{
+ int rc = 0;
+
+ if (clk_enable)
+ rc = cam_soc_util_clk_enable_default(soc_info, CAM_SVS_VOTE);
+ else
+ cam_soc_util_clk_disable_default(soc_info);
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.h
index 2dd2c08..18f3015 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.h
@@ -20,7 +20,8 @@
int cam_bps_enable_soc_resources(struct cam_hw_soc_info *soc_info);
-int cam_bps_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+int cam_bps_disable_soc_resources(struct cam_hw_soc_info *soc_info,
+ bool disable_clk);
int cam_bps_get_gdsc_control(struct cam_hw_soc_info *soc_info);
@@ -28,4 +29,5 @@
int cam_bps_update_clk_rate(struct cam_hw_soc_info *soc_info,
uint32_t clk_rate);
+int cam_bps_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable);
#endif /* _CAM_BPS_SOC_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 29a1b9a..f44fcc0 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -53,6 +53,9 @@
#define ICP_WORKQ_TASK_CMD_TYPE 1
#define ICP_WORKQ_TASK_MSG_TYPE 2
+#define ICP_DEV_TYPE_TO_CLK_TYPE(dev_type) \
+ ((dev_type == CAM_ICP_RES_TYPE_BPS) ? ICP_CLK_HW_BPS : ICP_CLK_HW_IPE)
+
static struct cam_icp_hw_mgr icp_hw_mgr;
static int cam_icp_send_ubwc_cfg(struct cam_icp_hw_mgr *hw_mgr)
@@ -60,7 +63,7 @@
struct cam_hw_intf *a5_dev_intf = NULL;
int rc;
- a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+ a5_dev_intf = hw_mgr->a5_dev_intf;
if (!a5_dev_intf) {
CAM_ERR(CAM_ICP, "a5_dev_intf is NULL");
return -EINVAL;
@@ -225,6 +228,104 @@
return 0;
}
+static int cam_icp_ctx_clk_info_init(struct cam_icp_hw_ctx_data *ctx_data)
+{
+ ctx_data->clk_info.curr_fc = 0;
+ ctx_data->clk_info.base_clk = 0;
+ ctx_data->clk_info.uncompressed_bw = 0;
+ ctx_data->clk_info.compressed_bw = 0;
+ cam_icp_supported_clk_rates(&icp_hw_mgr, ctx_data);
+
+ return 0;
+}
+
+static int32_t cam_icp_deinit_idle_clk(void *priv, void *data)
+{
+ struct cam_icp_hw_mgr *hw_mgr = (struct cam_icp_hw_mgr *)priv;
+ struct clk_work_data *task_data = (struct clk_work_data *)data;
+ struct cam_icp_clk_info *clk_info =
+ (struct cam_icp_clk_info *)task_data->data;
+ uint32_t id;
+ uint32_t i;
+ uint32_t curr_clk_rate;
+ struct cam_icp_hw_ctx_data *ctx_data;
+ struct cam_hw_intf *ipe0_dev_intf = NULL;
+ struct cam_hw_intf *ipe1_dev_intf = NULL;
+ struct cam_hw_intf *bps_dev_intf = NULL;
+ struct cam_hw_intf *dev_intf = NULL;
+
+ ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
+ ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
+ bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
+
+ clk_info->base_clk = 0;
+ clk_info->curr_clk = 0;
+ clk_info->over_clked = 0;
+
+ for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
+ ctx_data = &hw_mgr->ctx_data[i];
+ mutex_lock(&ctx_data->ctx_mutex);
+ if ((ctx_data->state != CAM_ICP_CTX_STATE_FREE) &&
+ (ICP_DEV_TYPE_TO_CLK_TYPE(ctx_data->
+ icp_dev_acquire_info->dev_type) == clk_info->hw_type))
+ cam_icp_ctx_clk_info_init(ctx_data);
+ mutex_unlock(&ctx_data->ctx_mutex);
+ }
+
+ if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
+ CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to update clk");
+ return -EINVAL;
+ }
+
+ if (clk_info->hw_type == ICP_CLK_HW_BPS) {
+ dev_intf = bps_dev_intf;
+ id = CAM_ICP_BPS_CMD_DISABLE_CLK;
+ } else if (clk_info->hw_type == ICP_CLK_HW_IPE) {
+ dev_intf = ipe0_dev_intf;
+ id = CAM_ICP_IPE_CMD_DISABLE_CLK;
+ } else {
+ CAM_ERR(CAM_ICP, "Error");
+ return 0;
+ }
+
+ CAM_DBG(CAM_ICP, "Disable %d", clk_info->hw_type);
+
+ dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, id,
+ &curr_clk_rate, sizeof(curr_clk_rate));
+
+ if (clk_info->hw_type != ICP_CLK_HW_BPS)
+ if (ipe1_dev_intf)
+ ipe1_dev_intf->hw_ops.process_cmd(
+ ipe1_dev_intf->hw_priv, id,
+ &curr_clk_rate, sizeof(curr_clk_rate));
+
+ return 0;
+}
+
+static void cam_icp_timer_cb(unsigned long data)
+{
+ unsigned long flags;
+ struct crm_workq_task *task;
+ struct clk_work_data *task_data;
+ struct cam_req_mgr_timer *timer = (struct cam_req_mgr_timer *)data;
+
+ spin_lock_irqsave(&icp_hw_mgr.hw_mgr_lock, flags);
+ task = cam_req_mgr_workq_get_task(icp_hw_mgr.msg_work);
+ if (!task) {
+ CAM_ERR(CAM_ICP, "no empty task");
+ spin_unlock_irqrestore(&icp_hw_mgr.hw_mgr_lock, flags);
+ return;
+ }
+
+ task_data = (struct clk_work_data *)task->payload;
+ task_data->data = timer->parent;
+ task_data->type = ICP_WORKQ_TASK_MSG_TYPE;
+ task->process_cb = cam_icp_deinit_idle_clk;
+ cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+ spin_unlock_irqrestore(&icp_hw_mgr.hw_mgr_lock, flags);
+}
+
static int cam_icp_clk_info_init(struct cam_icp_hw_mgr *hw_mgr,
struct cam_icp_hw_ctx_data *ctx_data)
{
@@ -237,21 +338,36 @@
hw_mgr->clk_info[i].over_clked = 0;
hw_mgr->clk_info[i].uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
hw_mgr->clk_info[i].compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+ hw_mgr->clk_info[i].hw_type = i;
}
hw_mgr->icp_default_clk = ICP_CLK_SVS_HZ;
return 0;
}
-static int cam_icp_ctx_clk_info_init(struct cam_icp_hw_ctx_data *ctx_data)
+static int cam_icp_timer_start(struct cam_icp_hw_mgr *hw_mgr)
{
- ctx_data->clk_info.curr_fc = 0;
- ctx_data->clk_info.base_clk = 0;
- ctx_data->clk_info.uncompressed_bw = 0;
- ctx_data->clk_info.compressed_bw = 0;
- cam_icp_supported_clk_rates(&icp_hw_mgr, ctx_data);
+ int rc = 0;
+ int i;
- return 0;
+ for (i = 0; i < ICP_CLK_HW_MAX; i++) {
+ if (!hw_mgr->clk_info[i].watch_dog) {
+ rc = crm_timer_init(&hw_mgr->clk_info[i].watch_dog,
+ 3000, &hw_mgr->clk_info[i], &cam_icp_timer_cb);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Failed to start timer %d", i);
+ }
+ }
+
+ return rc;
+}
+
+static void cam_icp_timer_stop(struct cam_icp_hw_mgr *hw_mgr)
+{
+ if (!hw_mgr->bps_ctxt_cnt)
+ crm_timer_exit(&hw_mgr->clk_info[ICP_CLK_HW_BPS].watch_dog);
+ else if (!hw_mgr->ipe_ctxt_cnt)
+ crm_timer_exit(&hw_mgr->clk_info[ICP_CLK_HW_IPE].watch_dog);
}
static uint32_t cam_icp_mgr_calc_base_clk(uint32_t frame_cycles,
@@ -335,7 +451,6 @@
* no need to update the clock
*/
mutex_lock(&hw_mgr->hw_mgr_mutex);
- ctx_data->clk_info.curr_fc = clk_info->frame_cycles;
ctx_data->clk_info.base_clk = base_clk;
hw_mgr_clk_info->over_clked = 0;
if (clk_info->frame_cycles > ctx_data->clk_info.curr_fc) {
@@ -360,6 +475,7 @@
rc = true;
}
}
+ ctx_data->clk_info.curr_fc = clk_info->frame_cycles;
mutex_unlock(&hw_mgr->hw_mgr_mutex);
return rc;
@@ -552,10 +668,15 @@
uint64_t req_id;
struct cam_icp_clk_info *hw_mgr_clk_info;
- if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+ if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS) {
+ crm_timer_reset(hw_mgr->clk_info[ICP_CLK_HW_BPS].watch_dog);
hw_mgr_clk_info = &hw_mgr->clk_info[ICP_CLK_HW_BPS];
- else
+ CAM_DBG(CAM_ICP, "Reset bps timer");
+ } else {
+ crm_timer_reset(hw_mgr->clk_info[ICP_CLK_HW_IPE].watch_dog);
hw_mgr_clk_info = &hw_mgr->clk_info[ICP_CLK_HW_IPE];
+ CAM_DBG(CAM_ICP, "Reset ipe timer");
+ }
if (icp_hw_mgr.icp_debug_clk)
return cam_icp_debug_clk_update(hw_mgr_clk_info);
@@ -627,9 +748,9 @@
struct cam_hw_intf *bps_dev_intf = NULL;
struct cam_hw_intf *dev_intf = NULL;
- ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
- ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
- bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
+ ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+ ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+ bps_dev_intf = hw_mgr->bps_dev_intf;
if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
@@ -670,9 +791,9 @@
struct cam_icp_clk_info *clk_info;
struct cam_icp_cpas_vote clk_update;
- ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
- ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
- bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
+ ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+ ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+ bps_dev_intf = hw_mgr->bps_dev_intf;
if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to update clk");
@@ -729,9 +850,9 @@
struct cam_hw_intf *bps_dev_intf = NULL;
int rc = 0;
- ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
- ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
- bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
+ ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+ ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+ bps_dev_intf = hw_mgr->bps_dev_intf;
if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to close");
@@ -793,9 +914,9 @@
struct cam_hw_intf *ipe1_dev_intf = NULL;
struct cam_hw_intf *bps_dev_intf = NULL;
- ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
- ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
- bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
+ ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+ ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+ bps_dev_intf = hw_mgr->bps_dev_intf;
if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to close");
@@ -997,6 +1118,17 @@
clear_bit(i, ctx_data->hfi_frame_process.bitmap);
}
+ for (i = 0; i < CAM_FRAME_CMD_MAX; i++) {
+ if (!hfi_frame_process->in_free_resource[i])
+ continue;
+
+ CAM_INFO(CAM_ICP, "Delete merged sync in object: %d",
+ ctx_data->hfi_frame_process.in_free_resource[i]);
+ cam_sync_destroy(
+ ctx_data->hfi_frame_process.in_free_resource[i]);
+ ctx_data->hfi_frame_process.in_resource[i] = 0;
+ }
+
return 0;
}
@@ -1009,6 +1141,7 @@
struct hfi_msg_ipebps_async_ack *ioconfig_ack = NULL;
struct hfi_frame_process_info *hfi_frame_process;
struct cam_hw_done_event_data buf_data;
+ uint32_t clk_type;
ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
request_id = ioconfig_ack->user_data2;
@@ -1020,6 +1153,10 @@
CAM_DBG(CAM_ICP, "ctx : %pK, request_id :%lld",
(void *)ctx_data->context_priv, request_id);
+ clk_type = ICP_DEV_TYPE_TO_CLK_TYPE(ctx_data->icp_dev_acquire_info->
+ dev_type);
+ crm_timer_reset(icp_hw_mgr.clk_info[clk_type].watch_dog);
+
mutex_lock(&ctx_data->ctx_mutex);
if (ctx_data->state != CAM_ICP_CTX_STATE_ACQUIRED) {
mutex_unlock(&ctx_data->ctx_mutex);
@@ -1615,7 +1752,7 @@
unsigned long rem_jiffies;
int timeout = 5000;
- a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+ a5_dev_intf = hw_mgr->a5_dev_intf;
if (!a5_dev_intf) {
CAM_ERR(CAM_ICP, "a5_dev_intf is invalid\n");
return -EINVAL;
@@ -1646,9 +1783,9 @@
struct cam_hw_intf *ipe1_dev_intf = NULL;
struct cam_hw_intf *bps_dev_intf = NULL;
- ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
- ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
- bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
+ ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+ ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+ bps_dev_intf = hw_mgr->bps_dev_intf;
if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to close");
return 0;
@@ -1670,7 +1807,7 @@
CAM_DBG(CAM_ICP, "ENTER");
- a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+ a5_dev_intf = hw_mgr->a5_dev_intf;
if (!a5_dev_intf) {
CAM_ERR(CAM_ICP, "a5_dev_intf is invalid\n");
return -EINVAL;
@@ -1692,7 +1829,7 @@
struct cam_hw_info *a5_dev = NULL;
struct hfi_mem_info hfi_mem;
- a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+ a5_dev_intf = hw_mgr->a5_dev_intf;
if (!a5_dev_intf) {
CAM_ERR(CAM_ICP, "a5_dev_intf is invalid\n");
return -EINVAL;
@@ -1740,7 +1877,7 @@
struct cam_hw_intf *a5_dev_intf = NULL;
CAM_DBG(CAM_ICP, "Enter");
- a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+ a5_dev_intf = hw_mgr->a5_dev_intf;
if (!a5_dev_intf) {
CAM_ERR(CAM_ICP, "a5 dev intf is wrong");
@@ -1819,7 +1956,7 @@
msecs_to_jiffies((timeout)));
if (!rem_jiffies) {
rc = -ETIMEDOUT;
- CAM_DBG(CAM_ICP, "FW timeout/err in abort handle command");
+ CAM_ERR(CAM_ICP, "FW timeout/err in abort handle command");
}
kfree(abort_cmd);
@@ -1941,10 +2078,10 @@
struct cam_hw_intf *ipe1_dev_intf = NULL;
struct cam_hw_intf *bps_dev_intf = NULL;
- a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
- ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
- ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
- bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
+ a5_dev_intf = hw_mgr->a5_dev_intf;
+ ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+ ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+ bps_dev_intf = hw_mgr->bps_dev_intf;
if ((!a5_dev_intf) || (!ipe0_dev_intf) || (!bps_dev_intf)) {
CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to close");
@@ -1974,7 +2111,7 @@
return 0;
}
- a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+ a5_dev_intf = hw_mgr->a5_dev_intf;
if (!a5_dev_intf) {
CAM_DBG(CAM_ICP, "a5_dev_intf is NULL");
mutex_unlock(&hw_mgr->hw_mgr_mutex);
@@ -2019,10 +2156,10 @@
struct cam_hw_intf *ipe1_dev_intf = NULL;
struct cam_hw_intf *bps_dev_intf = NULL;
- a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
- ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
- ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
- bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
+ a5_dev_intf = hw_mgr->a5_dev_intf;
+ ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+ ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+ bps_dev_intf = hw_mgr->bps_dev_intf;
if ((!a5_dev_intf) || (!ipe0_dev_intf) || (!bps_dev_intf)) {
CAM_ERR(CAM_ICP, "dev intfs are wrong");
@@ -2067,7 +2204,7 @@
struct cam_icp_a5_set_irq_cb irq_cb;
struct cam_icp_a5_set_fw_buf_info fw_buf_info;
- a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+ a5_dev_intf = hw_mgr->a5_dev_intf;
if (!a5_dev_intf) {
CAM_ERR(CAM_ICP, "a5_dev_intf is invalid");
return -EINVAL;
@@ -2116,7 +2253,7 @@
struct cam_hw_info *a5_dev = NULL;
struct hfi_mem_info hfi_mem;
- a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+ a5_dev_intf = hw_mgr->a5_dev_intf;
if (!a5_dev_intf) {
CAM_ERR(CAM_ICP, "a5_dev_intf is invalid");
return -EINVAL;
@@ -2158,7 +2295,7 @@
unsigned long rem_jiffies;
int timeout = 5000;
- a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+ a5_dev_intf = hw_mgr->a5_dev_intf;
if (!a5_dev_intf) {
CAM_ERR(CAM_ICP, "a5_dev_intf is invalid");
return -EINVAL;
@@ -2203,7 +2340,7 @@
return rc;
}
- a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+ a5_dev_intf = hw_mgr->a5_dev_intf;
if (!a5_dev_intf) {
CAM_ERR(CAM_ICP, "a5_dev_intf is invalid");
mutex_unlock(&hw_mgr->hw_mgr_mutex);
@@ -2702,6 +2839,175 @@
clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
}
mutex_unlock(&ctx_data->ctx_mutex);
+ return 0;
+}
+
+static int cam_icp_mgr_delete_sync(void *priv, void *data)
+{
+ struct hfi_cmd_work_data *task_data = NULL;
+ struct cam_icp_hw_ctx_data *ctx_data;
+ struct hfi_frame_process_info *hfi_frame_process;
+ int idx;
+
+ if (!data || !priv) {
+ CAM_ERR(CAM_ICP, "Invalid params%pK %pK", data, priv);
+ return -EINVAL;
+ }
+
+ task_data = (struct hfi_cmd_work_data *)data;
+ ctx_data = task_data->data;
+
+ if (!ctx_data) {
+ CAM_ERR(CAM_ICP, "Null Context");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctx_data->ctx_mutex);
+ hfi_frame_process = &ctx_data->hfi_frame_process;
+ for (idx = 0; idx < CAM_FRAME_CMD_MAX; idx++) {
+ if (!hfi_frame_process->in_free_resource[idx])
+ continue;
+ //cam_sync_destroy(
+ //ctx_data->hfi_frame_process.in_free_resource[idx]);
+ ctx_data->hfi_frame_process.in_resource[idx] = 0;
+ }
+ mutex_unlock(&ctx_data->ctx_mutex);
+ return 0;
+}
+
+static int cam_icp_mgr_delete_sync_obj(struct cam_icp_hw_ctx_data *ctx_data)
+{
+ int rc = 0;
+ struct crm_workq_task *task;
+ struct hfi_cmd_work_data *task_data;
+
+ task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+ if (!task) {
+ CAM_ERR(CAM_ICP, "no empty task");
+ return -ENOMEM;
+ }
+
+ task_data = (struct hfi_cmd_work_data *)task->payload;
+ task_data->data = (void *)ctx_data;
+ task_data->request_id = 0;
+ task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+ task->process_cb = cam_icp_mgr_delete_sync;
+ rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+
+ return rc;
+}
+
+static int cam_icp_mgr_flush_all(struct cam_icp_hw_ctx_data *ctx_data,
+ struct cam_hw_flush_args *flush_args)
+{
+ struct hfi_frame_process_info *hfi_frame_process;
+ int idx;
+ bool clear_in_resource = false;
+
+ hfi_frame_process = &ctx_data->hfi_frame_process;
+ for (idx = 0; idx < CAM_FRAME_CMD_MAX; idx++) {
+ if (!hfi_frame_process->request_id[idx])
+ continue;
+
+ /* now release memory for hfi frame process command */
+ hfi_frame_process->request_id[idx] = 0;
+ if (ctx_data->hfi_frame_process.in_resource[idx] > 0) {
+ ctx_data->hfi_frame_process.in_free_resource[idx] =
+ ctx_data->hfi_frame_process.in_resource[idx];
+ ctx_data->hfi_frame_process.in_resource[idx] = 0;
+ }
+ clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
+ clear_in_resource = true;
+ }
+
+ if (clear_in_resource)
+ cam_icp_mgr_delete_sync_obj(ctx_data);
+
+ return 0;
+}
+
+static int cam_icp_mgr_flush_req(struct cam_icp_hw_ctx_data *ctx_data,
+ struct cam_hw_flush_args *flush_args)
+{
+ int64_t request_id;
+ struct hfi_frame_process_info *hfi_frame_process;
+ int idx;
+ bool clear_in_resource = false;
+
+ hfi_frame_process = &ctx_data->hfi_frame_process;
+ request_id = *(int64_t *)flush_args->flush_req_pending[0];
+ for (idx = 0; idx < CAM_FRAME_CMD_MAX; idx++) {
+ if (!hfi_frame_process->request_id[idx])
+ continue;
+
+ if (hfi_frame_process->request_id[idx] != request_id)
+ continue;
+
+ /* now release memory for hfi frame process command */
+ hfi_frame_process->request_id[idx] = 0;
+ if (ctx_data->hfi_frame_process.in_resource[idx] > 0) {
+ ctx_data->hfi_frame_process.in_free_resource[idx] =
+ ctx_data->hfi_frame_process.in_resource[idx];
+ ctx_data->hfi_frame_process.in_resource[idx] = 0;
+ }
+ clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
+ clear_in_resource = true;
+ }
+
+ if (clear_in_resource)
+ cam_icp_mgr_delete_sync_obj(ctx_data);
+
+ return 0;
+}
+
+static int cam_icp_mgr_hw_flush(void *hw_priv, void *hw_flush_args)
+{
+ struct cam_hw_flush_args *flush_args = hw_flush_args;
+ struct cam_icp_hw_ctx_data *ctx_data;
+
+ if ((!hw_priv) || (!hw_flush_args)) {
+ CAM_ERR(CAM_ICP, "Input params are Null:");
+ return -EINVAL;
+ }
+
+ ctx_data = flush_args->ctxt_to_hw_map;
+ if (!ctx_data) {
+ CAM_ERR(CAM_ICP, "Ctx data is NULL");
+ return -EINVAL;
+ }
+
+ if ((flush_args->flush_type >= CAM_FLUSH_TYPE_MAX) ||
+ (flush_args->flush_type < CAM_FLUSH_TYPE_REQ)) {
+ CAM_ERR(CAM_ICP, "Invalid lush type: %d",
+ flush_args->flush_type);
+ return -EINVAL;
+ }
+
+ switch (flush_args->flush_type) {
+ case CAM_FLUSH_TYPE_ALL:
+ if (flush_args->num_req_active)
+ cam_icp_mgr_abort_handle(ctx_data);
+ mutex_lock(&ctx_data->ctx_mutex);
+ cam_icp_mgr_flush_all(ctx_data, flush_args);
+ mutex_unlock(&ctx_data->ctx_mutex);
+ break;
+ case CAM_FLUSH_TYPE_REQ:
+ mutex_lock(&ctx_data->ctx_mutex);
+ if (flush_args->num_req_active) {
+ CAM_ERR(CAM_ICP, "Flush request is not supported");
+ mutex_unlock(&ctx_data->ctx_mutex);
+ return -EINVAL;
+ }
+ if (flush_args->num_req_pending)
+ cam_icp_mgr_flush_req(ctx_data, flush_args);
+ mutex_unlock(&ctx_data->ctx_mutex);
+ break;
+ default:
+ CAM_ERR(CAM_ICP, "Invalid flush type: %d",
+ flush_args->flush_type);
+ return -EINVAL;
+ }
return 0;
}
@@ -2754,6 +3060,9 @@
}
mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ if (!hw_mgr->bps_ctxt_cnt || !hw_mgr->ipe_ctxt_cnt)
+ cam_icp_timer_stop(hw_mgr);
+
return rc;
}
@@ -3043,6 +3352,10 @@
goto ubwc_cfg_failed;
}
}
+
+ if (!hw_mgr->bps_ctxt_cnt || !hw_mgr->ipe_ctxt_cnt)
+ cam_icp_timer_start(hw_mgr);
+
rc = cam_icp_mgr_ipe_bps_resume(hw_mgr, ctx_data);
if (rc) {
mutex_unlock(&hw_mgr->hw_mgr_mutex);
@@ -3276,6 +3589,13 @@
of_node_put(child_node);
}
+ icp_hw_mgr.a5_dev_intf = icp_hw_mgr.devices[CAM_ICP_DEV_A5][0];
+ icp_hw_mgr.bps_dev_intf = icp_hw_mgr.devices[CAM_ICP_DEV_BPS][0];
+ icp_hw_mgr.ipe0_dev_intf = icp_hw_mgr.devices[CAM_ICP_DEV_IPE][0];
+ if (icp_hw_mgr.ipe1_enable)
+ icp_hw_mgr.ipe1_dev_intf =
+ icp_hw_mgr.devices[CAM_ICP_DEV_IPE][1];
+
return 0;
compat_hw_name_failed:
kfree(icp_hw_mgr.devices[CAM_ICP_DEV_BPS]);
@@ -3360,6 +3680,7 @@
hw_mgr_intf->hw_config = cam_icp_mgr_config_hw;
hw_mgr_intf->hw_open = cam_icp_mgr_hw_open;
hw_mgr_intf->hw_close = cam_icp_mgr_hw_close;
+ hw_mgr_intf->hw_flush = cam_icp_mgr_hw_flush;
icp_hw_mgr.secure_mode = CAM_SECURE_MODE_NON_SECURE;
mutex_init(&icp_hw_mgr.hw_mgr_mutex);
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
index e8919e8..43d7a4a 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
@@ -25,6 +25,7 @@
#include "cam_mem_mgr.h"
#include "cam_smmu_api.h"
#include "cam_soc_util.h"
+#include "cam_req_mgr_timer.h"
#define CAM_ICP_ROLE_PARENT 1
#define CAM_ICP_ROLE_CHILD 2
@@ -111,6 +112,16 @@
};
/**
+ * struct clk_work_data
+ * @type: Task type
+ * @data: Pointer to clock info
+ */
+struct clk_work_data {
+ uint32_t type;
+ void *data;
+};
+
+/**
* struct hfi_frame_process_info
* @hfi_frame_cmd: Frame process command info
* @bitmap: Bitmap for hfi_frame_cmd
@@ -131,6 +142,7 @@
uint32_t num_out_resources[CAM_FRAME_CMD_MAX];
uint32_t out_resource[CAM_FRAME_CMD_MAX][CAM_MAX_OUT_RES];
uint32_t in_resource[CAM_FRAME_CMD_MAX];
+ uint32_t in_free_resource[CAM_FRAME_CMD_MAX];
uint32_t fw_process_flag[CAM_FRAME_CMD_MAX];
struct cam_icp_clk_bw_request clk_info[CAM_FRAME_CMD_MAX];
};
@@ -206,8 +218,10 @@
* @curr_clk: Current clock of hadrware
* @threshold: Threshold for overclk count
* @over_clked: Over clock count
- * #uncompressed_bw: Current bandwidth voting
+ * @uncompressed_bw: Current bandwidth voting
* @compressed_bw: Current compressed bandwidth voting
+ * @hw_type: IPE/BPS device type
+ * @watch_dog: watchdog timer handle
*/
struct cam_icp_clk_info {
uint32_t base_clk;
@@ -216,6 +230,8 @@
uint32_t over_clked;
uint64_t uncompressed_bw;
uint64_t compressed_bw;
+ uint32_t hw_type;
+ struct cam_req_mgr_timer *watch_dog;
};
/**
@@ -290,6 +306,10 @@
bool ipe1_enable;
bool bps_enable;
uint32_t core_info;
+ struct cam_hw_intf *a5_dev_intf;
+ struct cam_hw_intf *ipe0_dev_intf;
+ struct cam_hw_intf *ipe1_dev_intf;
+ struct cam_hw_intf *bps_dev_intf;
};
static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args);
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h
index d79187f..4f07172 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h
@@ -27,6 +27,7 @@
CAM_ICP_BPS_CMD_CPAS_START,
CAM_ICP_BPS_CMD_CPAS_STOP,
CAM_ICP_BPS_CMD_UPDATE_CLK,
+ CAM_ICP_BPS_CMD_DISABLE_CLK,
CAM_ICP_BPS_CMD_MAX,
};
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h
index 697757e..0943bef 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h
@@ -27,6 +27,7 @@
CAM_ICP_IPE_CMD_CPAS_START,
CAM_ICP_IPE_CMD_CPAS_STOP,
CAM_ICP_IPE_CMD_UPDATE_CLK,
+ CAM_ICP_IPE_CMD_DISABLE_CLK,
CAM_ICP_IPE_CMD_MAX,
};
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
index 8630e34..5b4156a 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
@@ -91,6 +91,8 @@
CAM_ERR(CAM_ICP, "cpas stop is failed");
else
core_info->cpas_start = false;
+ } else {
+ core_info->clk_enable = true;
}
return rc;
@@ -117,9 +119,10 @@
return -EINVAL;
}
- rc = cam_ipe_disable_soc_resources(soc_info);
+ rc = cam_ipe_disable_soc_resources(soc_info, core_info->clk_enable);
if (rc)
CAM_ERR(CAM_ICP, "soc disable is failed : %d", rc);
+ core_info->clk_enable = false;
if (core_info->cpas_start) {
if (cam_cpas_stop(core_info->cpas_handle))
@@ -267,8 +270,31 @@
uint32_t clk_rate = *(uint32_t *)cmd_args;
CAM_DBG(CAM_ICP, "ipe_src_clk rate = %d", (int)clk_rate);
- rc = cam_ipe_update_clk_rate(soc_info, clk_rate);
+ if (!core_info->clk_enable) {
+ cam_ipe_handle_pc(ipe_dev);
+ cam_cpas_reg_write(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP,
+ hw_info->pwr_ctrl, true, 0x0);
+ rc = cam_ipe_toggle_clk(soc_info, true);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Enable failed");
+ else
+ core_info->clk_enable = true;
+ rc = cam_ipe_handle_resume(ipe_dev);
+ if (rc)
+ CAM_ERR(CAM_ICP, "handle resume failed");
}
+ CAM_DBG(CAM_ICP, "clock rate %d", clk_rate);
+
+ rc = cam_ipe_update_clk_rate(soc_info, clk_rate);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Failed to update clk");
+ }
+ break;
+ case CAM_ICP_IPE_CMD_DISABLE_CLK:
+ if (core_info->clk_enable == true)
+ cam_ipe_toggle_clk(soc_info, false);
+ core_info->clk_enable = false;
break;
default:
break;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.h
index bd83972..65d3490 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.h
@@ -33,6 +33,7 @@
struct cam_ipe_device_hw_info *ipe_hw_info;
uint32_t cpas_handle;
bool cpas_start;
+ bool clk_enable;
};
int cam_ipe_init_hw(void *device_priv,
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
index 71af1a2..289d7d4 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
@@ -125,11 +125,13 @@
return rc;
}
-int cam_ipe_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+int cam_ipe_disable_soc_resources(struct cam_hw_soc_info *soc_info,
+ bool disable_clk)
{
int rc = 0;
- rc = cam_soc_util_disable_platform_resource(soc_info, true, false);
+ rc = cam_soc_util_disable_platform_resource(soc_info, disable_clk,
+ false);
if (rc)
CAM_ERR(CAM_ICP, "enable platform failed");
@@ -145,3 +147,15 @@
return cam_soc_util_set_clk_rate(soc_info->clk[soc_info->src_clk_idx],
soc_info->clk_name[soc_info->src_clk_idx], clk_rate);
}
+
+int cam_ipe_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable)
+{
+ int rc = 0;
+
+ if (clk_enable)
+ rc = cam_soc_util_clk_enable_default(soc_info, CAM_SVS_VOTE);
+ else
+ cam_soc_util_clk_disable_default(soc_info);
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.h
index 8e5a38a..5385bde 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.h
@@ -20,7 +20,8 @@
int cam_ipe_enable_soc_resources(struct cam_hw_soc_info *soc_info);
-int cam_ipe_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+int cam_ipe_disable_soc_resources(struct cam_hw_soc_info *soc_info,
+ bool disable_clk);
int cam_ipe_get_gdsc_control(struct cam_hw_soc_info *soc_info);
@@ -28,4 +29,5 @@
int cam_ipe_update_clk_rate(struct cam_hw_soc_info *soc_info,
uint32_t clk_rate);
+int cam_ipe_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable);
#endif /* CAM_IPE_SOC_H */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
index 187aeaf..4a7a4f2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
@@ -177,7 +177,10 @@
rc = cam_vfe_get_evt_payload(handler_priv->core_info, &evt_payload);
if (rc) {
CAM_ERR_RATE_LIMIT(CAM_ISP,
- "No tasklet_cmd is free in queue\n");
+ "No tasklet_cmd is free in queue");
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ status0=0x%x status1=0x%x",
+ th_payload->evt_status_arr[0],
+ th_payload->evt_status_arr[1]);
return rc;
}
@@ -431,7 +434,10 @@
rc = cam_vfe_get_evt_payload(handler_priv->core_info, &evt_payload);
if (rc) {
CAM_ERR_RATE_LIMIT(CAM_ISP,
- "No tasklet_cmd is free in queue\n");
+ "No tasklet_cmd is free in queue");
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ status0=0x%x status1=0x%x",
+ th_payload->evt_status_arr[0],
+ th_payload->evt_status_arr[1]);
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index a2fbbd7..c166113 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -1169,13 +1169,19 @@
rsrc_data = wm_res->res_priv;
- CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]);
- CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]);
+ CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
+ CAM_DBG(CAM_ISP, "IRQ status_1 = 0x%x", th_payload->evt_status_arr[1]);
rc = cam_vfe_bus_get_evt_payload(rsrc_data->common_data, &evt_payload);
if (rc) {
CAM_ERR_RATE_LIMIT(CAM_ISP,
"No tasklet_cmd is free in queue");
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "IRQ status_0 = 0x%x status_1 = 0x%x status_2 = 0x%x",
+ th_payload->evt_status_arr[0],
+ th_payload->evt_status_arr[1],
+ th_payload->evt_status_arr[2]);
+
return rc;
}
@@ -1665,14 +1671,20 @@
rsrc_data = comp_grp->res_priv;
- CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]);
- CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]);
- CAM_DBG(CAM_ISP, "IRQ status_2 = %x", th_payload->evt_status_arr[2]);
+ CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
+ CAM_DBG(CAM_ISP, "IRQ status_1 = 0x%x", th_payload->evt_status_arr[1]);
+ CAM_DBG(CAM_ISP, "IRQ status_2 = 0x%x", th_payload->evt_status_arr[2]);
rc = cam_vfe_bus_get_evt_payload(rsrc_data->common_data, &evt_payload);
if (rc) {
CAM_ERR_RATE_LIMIT(CAM_ISP,
"No tasklet_cmd is free in queue");
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "IRQ status_0 = 0x%x status_1 = 0x%x status_2 = 0x%x",
+ th_payload->evt_status_arr[0],
+ th_payload->evt_status_arr[1],
+ th_payload->evt_status_arr[2]);
+
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
index 4589a22..1ccef0d 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
@@ -51,6 +51,18 @@
return rc;
}
+static int __cam_jpeg_ctx_flush_dev_in_acquired(struct cam_context *ctx,
+ struct cam_flush_dev_cmd *cmd)
+{
+ int rc;
+
+ rc = cam_context_flush_dev_to_hw(ctx, cmd);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Failed to flush device");
+
+ return rc;
+}
+
static int __cam_jpeg_ctx_config_dev_in_acquired(struct cam_context *ctx,
struct cam_config_dev_cmd *cmd)
{
@@ -100,6 +112,7 @@
.release_dev = __cam_jpeg_ctx_release_dev_in_acquired,
.config_dev = __cam_jpeg_ctx_config_dev_in_acquired,
.stop_dev = __cam_jpeg_ctx_stop_dev_in_acquired,
+ .flush_dev = __cam_jpeg_ctx_flush_dev_in_acquired,
},
.crm_ops = { },
.irq_ops = __cam_jpeg_ctx_handle_buf_done_in_acquired,
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
index e401549..65922dd 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
@@ -556,6 +556,7 @@
p_cfg_req->dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
request_id = (uint64_t)config_args->priv;
+ p_cfg_req->req_id = request_id;
hw_update_entries = config_args->hw_update_entries;
CAM_DBG(CAM_JPEG, "ctx_data = %pK req_id = %lld %lld",
ctx_data, request_id, (uint64_t)config_args->priv);
@@ -779,13 +780,92 @@
hw_cfg_args.ctxt_to_hw_map != ctx_data)
continue;
- CAM_INFO(CAM_JPEG, "deleting req %pK", cfg_req);
list_del_init(&cfg_req->list);
}
return rc;
}
+
+static int cam_jpeg_mgr_flush_req(void *hw_mgr_priv,
+ struct cam_jpeg_hw_ctx_data *ctx_data,
+ struct cam_hw_flush_args *flush_args)
+{
+ struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_jpeg_hw_cfg_req *cfg_req, *req_temp;
+ int64_t request_id;
+
+ if (!hw_mgr || !ctx_data || !flush_args) {
+ CAM_ERR(CAM_JPEG, "Invalid args");
+ return -EINVAL;
+ }
+
+ request_id = *(int64_t *)flush_args->flush_req_pending[0];
+ list_for_each_entry_safe(cfg_req, req_temp,
+ &hw_mgr->hw_config_req_list, list) {
+ if (cfg_req->hw_cfg_args.ctxt_to_hw_map
+ != ctx_data)
+ continue;
+
+ if (cfg_req->req_id != request_id)
+ continue;
+
+ list_del_init(&cfg_req->list);
+ }
+
+ return 0;
+}
+
+static int cam_jpeg_mgr_hw_flush(void *hw_mgr_priv, void *flush_hw_args)
+{
+ int rc = 0;
+ struct cam_hw_flush_args *flush_args = flush_hw_args;
+ struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+
+ if (!hw_mgr || !flush_args || !flush_args->ctxt_to_hw_map) {
+ CAM_ERR(CAM_JPEG, "Invalid args");
+ return -EINVAL;
+ }
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+ ctx_data = (struct cam_jpeg_hw_ctx_data *)flush_args->ctxt_to_hw_map;
+ if (!ctx_data->in_use) {
+ CAM_ERR(CAM_JPEG, "ctx is not in use");
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return -EINVAL;
+ }
+
+ if ((flush_args->flush_type >= CAM_FLUSH_TYPE_MAX) ||
+ (flush_args->flush_type < CAM_FLUSH_TYPE_REQ)) {
+ CAM_ERR(CAM_JPEG, "Invalid flush type: %d",
+ flush_args->flush_type);
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return -EINVAL;
+ }
+
+ switch (flush_args->flush_type) {
+ case CAM_FLUSH_TYPE_ALL:
+ rc = cam_jpeg_mgr_flush(hw_mgr_priv, ctx_data);
+ if ((rc))
+ CAM_ERR(CAM_JPEG, "Flush failed %d", rc);
+ break;
+ case CAM_FLUSH_TYPE_REQ:
+ rc = cam_jpeg_mgr_flush_req(hw_mgr_priv, ctx_data, flush_args);
+ CAM_ERR(CAM_JPEG, "Flush per request is not supported");
+ break;
+ default:
+ CAM_ERR(CAM_JPEG, "Invalid flush type: %d",
+ flush_args->flush_type);
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return -EINVAL;
+ }
+
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ return rc;
+}
+
static int cam_jpeg_mgr_hw_stop(void *hw_mgr_priv, void *stop_hw_args)
{
int rc;
@@ -1281,6 +1361,7 @@
hw_mgr_intf->hw_release = cam_jpeg_mgr_release_hw;
hw_mgr_intf->hw_prepare_update = cam_jpeg_mgr_prepare_hw_update;
hw_mgr_intf->hw_config = cam_jpeg_mgr_config_hw;
+ hw_mgr_intf->hw_flush = cam_jpeg_mgr_hw_flush;
hw_mgr_intf->hw_stop = cam_jpeg_mgr_hw_stop;
mutex_init(&g_jpeg_hw_mgr.hw_mgr_mutex);
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h
index 9e3418d..dce47d2 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h
@@ -75,11 +75,13 @@
* @list_head: List head
* @hw_cfg_args: Hw config args
* @dev_type: Dev type for cfg request
+ * @req_id: Request Id
*/
struct cam_jpeg_hw_cfg_req {
struct list_head list;
struct cam_hw_config_args hw_cfg_args;
uint32_t dev_type;
+ int64_t req_id;
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c
index 0aa5ade..1ab3143 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c
@@ -91,6 +91,17 @@
return rc;
}
+static int __cam_lrme_ctx_flush_dev_in_activated(struct cam_context *ctx,
+ struct cam_flush_dev_cmd *cmd)
+{
+ int rc;
+
+ rc = cam_context_flush_dev_to_hw(ctx, cmd);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Failed to flush device");
+
+ return rc;
+}
static int __cam_lrme_ctx_stop_dev_in_activated(struct cam_context *ctx,
struct cam_start_stop_dev_cmd *cmd)
{
@@ -187,6 +198,7 @@
.config_dev = __cam_lrme_ctx_config_dev_in_activated,
.release_dev = __cam_lrme_ctx_release_dev_in_activated,
.stop_dev = __cam_lrme_ctx_stop_dev_in_activated,
+ .flush_dev = __cam_lrme_ctx_flush_dev_in_activated,
},
.crm_ops = {},
.irq_ops = __cam_lrme_ctx_handle_irq_in_activated,
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
index 448086d..20b8586 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
@@ -648,6 +648,86 @@
return rc;
}
+static int cam_lrme_mgr_hw_flush(void *hw_mgr_priv, void *hw_flush_args)
+{ int rc = 0, i;
+ struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_hw_flush_args *args;
+ struct cam_lrme_device *hw_device;
+ struct cam_lrme_frame_request *frame_req = NULL, *req_to_flush = NULL;
+ struct cam_lrme_frame_request **req_list = NULL;
+ uint32_t device_index;
+ struct cam_lrme_hw_flush_args lrme_flush_args;
+ uint32_t priority;
+
+ if (!hw_mgr_priv || !hw_flush_args) {
+ CAM_ERR(CAM_LRME, "Invalid args %pK %pK",
+ hw_mgr_priv, hw_flush_args);
+ return -EINVAL;
+ }
+
+ args = (struct cam_hw_flush_args *)hw_flush_args;
+ device_index = ((uint64_t)args->ctxt_to_hw_map & 0xF);
+ if (device_index >= hw_mgr->device_count) {
+ CAM_ERR(CAM_LRME, "Invalid device index %d", device_index);
+ return -EPERM;
+ }
+
+ rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Error in getting device %d", rc);
+ goto end;
+ }
+
+ req_list = (struct cam_lrme_frame_request **)args->flush_req_pending;
+ for (i = 0; i < args->num_req_pending; i++) {
+ frame_req = req_list[i];
+ memset(frame_req, 0x0, sizeof(*frame_req));
+ cam_lrme_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+ &frame_req->frame_list, &hw_mgr->free_req_lock);
+ }
+
+ req_list = (struct cam_lrme_frame_request **)args->flush_req_active;
+ for (i = 0; i < args->num_req_active; i++) {
+ frame_req = req_list[i];
+ priority = CAM_LRME_DECODE_PRIORITY(args->ctxt_to_hw_map);
+ spin_lock((priority == CAM_LRME_PRIORITY_HIGH) ?
+ &hw_device->high_req_lock :
+ &hw_device->normal_req_lock);
+ if (!list_empty(&frame_req->frame_list)) {
+ list_del_init(&frame_req->frame_list);
+ cam_lrme_mgr_util_put_frame_req(
+ &hw_mgr->frame_free_list,
+ &frame_req->frame_list,
+ &hw_mgr->free_req_lock);
+ } else
+ req_to_flush = frame_req;
+ spin_unlock((priority == CAM_LRME_PRIORITY_HIGH) ?
+ &hw_device->high_req_lock :
+ &hw_device->normal_req_lock);
+ }
+ if (!req_to_flush)
+ goto end;
+ if (hw_device->hw_intf.hw_ops.flush) {
+ lrme_flush_args.ctxt_to_hw_map = req_to_flush->ctxt_to_hw_map;
+ lrme_flush_args.flush_type = args->flush_type;
+ lrme_flush_args.req_to_flush = req_to_flush;
+ rc = hw_device->hw_intf.hw_ops.flush(hw_device->hw_intf.hw_priv,
+ &lrme_flush_args,
+ sizeof(lrme_flush_args));
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed in HW Stop %d", rc);
+ goto end;
+ }
+ } else {
+ CAM_ERR(CAM_LRME, "No stop ops");
+ goto end;
+ }
+
+end:
+ return rc;
+}
+
+
static int cam_lrme_mgr_hw_start(void *hw_mgr_priv, void *hw_start_args)
{
int rc = 0;
@@ -1026,6 +1106,7 @@
hw_mgr_intf->hw_read = NULL;
hw_mgr_intf->hw_write = NULL;
hw_mgr_intf->hw_close = NULL;
+ hw_mgr_intf->hw_flush = cam_lrme_mgr_hw_flush;
g_lrme_hw_mgr.event_cb = cam_lrme_dev_buf_done_cb;
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
index dbd969c..3fc9032 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
@@ -411,6 +411,131 @@
return rc;
}
+static int cam_lrme_hw_util_flush_ctx(struct cam_hw_info *lrme_hw,
+ void *ctxt_to_hw_map)
+{
+ int rc = -ENODEV;
+ struct cam_lrme_core *lrme_core = lrme_hw->core_info;
+ struct cam_lrme_hw_cb_args cb_args;
+ struct cam_lrme_frame_request *req_proc, *req_submit;
+ struct cam_lrme_hw_submit_args submit_args;
+
+ rc = cam_lrme_hw_util_reset(lrme_hw, CAM_LRME_HW_RESET_TYPE_HW_RESET);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "reset failed");
+ return rc;
+ }
+
+ lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
+ req_proc = lrme_core->req_proc;
+ req_submit = lrme_core->req_submit;
+ lrme_core->req_proc = NULL;
+ lrme_core->req_submit = NULL;
+
+ if (req_submit && req_submit->ctxt_to_hw_map == ctxt_to_hw_map) {
+ cb_args.cb_type = CAM_LRME_CB_PUT_FRAME;
+ cb_args.frame_req = req_submit;
+ if (lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb)
+ lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(lrme_core->
+ hw_mgr_cb.data, &cb_args);
+ } else if (req_submit) {
+ submit_args.frame_req = req_submit;
+ submit_args.hw_update_entries = req_submit->hw_update_entries;
+ submit_args.num_hw_update_entries =
+ req_submit->num_hw_update_entries;
+ rc = cam_lrme_hw_util_submit_req(lrme_core, req_submit);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Submit failed");
+ lrme_core->req_submit = req_submit;
+ cam_lrme_hw_util_submit_go(lrme_hw);
+ lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING;
+ }
+
+ if (req_proc && req_proc->ctxt_to_hw_map == ctxt_to_hw_map) {
+ cb_args.cb_type = CAM_LRME_CB_PUT_FRAME;
+ cb_args.frame_req = req_proc;
+ if (lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb)
+ lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(lrme_core->
+ hw_mgr_cb.data, &cb_args);
+ } else if (req_proc) {
+ submit_args.frame_req = req_proc;
+ submit_args.hw_update_entries = req_proc->hw_update_entries;
+ submit_args.num_hw_update_entries =
+ req_proc->num_hw_update_entries;
+ rc = cam_lrme_hw_util_submit_req(lrme_core, req_proc);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Submit failed");
+ lrme_core->req_submit = req_proc;
+ cam_lrme_hw_util_submit_go(lrme_hw);
+ lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING;
+ }
+
+ return rc;
+}
+
+static int cam_lrme_hw_util_flush_req(struct cam_hw_info *lrme_hw,
+ struct cam_lrme_frame_request *req_to_flush)
+{
+ int rc = -ENODEV;
+ struct cam_lrme_core *lrme_core = lrme_hw->core_info;
+ struct cam_lrme_hw_cb_args cb_args;
+ struct cam_lrme_frame_request *req_proc, *req_submit;
+ struct cam_lrme_hw_submit_args submit_args;
+
+ rc = cam_lrme_hw_util_reset(lrme_hw, CAM_LRME_HW_RESET_TYPE_HW_RESET);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "reset failed");
+ return rc;
+ }
+
+ lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
+ req_proc = lrme_core->req_proc;
+ req_submit = lrme_core->req_submit;
+ lrme_core->req_proc = NULL;
+ lrme_core->req_submit = NULL;
+
+ if (req_submit && req_submit == req_to_flush) {
+ cb_args.cb_type = CAM_LRME_CB_PUT_FRAME;
+ cb_args.frame_req = req_submit;
+ if (lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb)
+ lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(lrme_core->
+ hw_mgr_cb.data, &cb_args);
+ } else if (req_submit) {
+ submit_args.frame_req = req_submit;
+ submit_args.hw_update_entries = req_submit->hw_update_entries;
+ submit_args.num_hw_update_entries =
+ req_submit->num_hw_update_entries;
+ rc = cam_lrme_hw_util_submit_req(lrme_core, req_submit);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Submit failed");
+ lrme_core->req_submit = req_submit;
+ cam_lrme_hw_util_submit_go(lrme_hw);
+ lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING;
+ }
+
+ if (req_proc && req_proc == req_to_flush) {
+ cb_args.cb_type = CAM_LRME_CB_PUT_FRAME;
+ cb_args.frame_req = req_proc;
+ if (lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb)
+ lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(lrme_core->
+ hw_mgr_cb.data, &cb_args);
+ } else if (req_proc) {
+ submit_args.frame_req = req_proc;
+ submit_args.hw_update_entries = req_proc->hw_update_entries;
+ submit_args.num_hw_update_entries =
+ req_proc->num_hw_update_entries;
+ rc = cam_lrme_hw_util_submit_req(lrme_core, req_proc);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Submit failed");
+ lrme_core->req_submit = req_proc;
+ cam_lrme_hw_util_submit_go(lrme_hw);
+ lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING;
+ }
+
+ return rc;
+}
+
+
static int cam_lrme_hw_util_process_err(struct cam_hw_info *lrme_hw)
{
struct cam_lrme_core *lrme_core = lrme_hw->core_info;
@@ -595,7 +720,10 @@
if (top_irq_status & (1 << 4)) {
CAM_DBG(CAM_LRME, "IDLE");
-
+ if (!lrme_core->req_proc) {
+ CAM_DBG(CAM_LRME, "No frame request to process idle");
+ goto end;
+ }
rc = cam_lrme_hw_util_process_idle(lrme_hw, &cb_args);
if (rc) {
CAM_ERR(CAM_LRME, "Process idle failed");
@@ -868,6 +996,81 @@
return 0;
}
+int cam_lrme_hw_flush(void *hw_priv, void *hw_flush_args, uint32_t arg_size)
+{
+ struct cam_lrme_core *lrme_core = NULL;
+ struct cam_hw_info *lrme_hw = hw_priv;
+ struct cam_lrme_hw_flush_args *flush_args =
+ (struct cam_lrme_hw_flush_args *)hw_flush_args;
+ int rc = -ENODEV;
+
+ if (!hw_priv) {
+ CAM_ERR(CAM_LRME, "Invalid arguments %pK", hw_priv);
+ return -EINVAL;
+ }
+
+ lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+
+ mutex_lock(&lrme_hw->hw_mutex);
+
+ if (lrme_core->state != CAM_LRME_CORE_STATE_PROCESSING &&
+ lrme_core->state != CAM_LRME_CORE_STATE_REQ_PENDING &&
+ lrme_core->state == CAM_LRME_CORE_STATE_REQ_PROC_PEND) {
+ mutex_unlock(&lrme_hw->hw_mutex);
+ CAM_DBG(CAM_LRME, "Stop not needed in %d state",
+ lrme_core->state);
+ return 0;
+ }
+
+ if (!lrme_core->req_proc && !lrme_core->req_submit) {
+ mutex_unlock(&lrme_hw->hw_mutex);
+ CAM_DBG(CAM_LRME, "no req in device");
+ return 0;
+ }
+
+ switch (flush_args->flush_type) {
+ case CAM_FLUSH_TYPE_ALL:
+ if ((!lrme_core->req_submit ||
+ lrme_core->req_submit->ctxt_to_hw_map !=
+ flush_args->ctxt_to_hw_map) &&
+ (!lrme_core->req_proc ||
+ lrme_core->req_proc->ctxt_to_hw_map !=
+ flush_args->ctxt_to_hw_map)) {
+ mutex_unlock(&lrme_hw->hw_mutex);
+ CAM_DBG(CAM_LRME, "hw running on different ctx");
+ return 0;
+ }
+ rc = cam_lrme_hw_util_flush_ctx(lrme_hw,
+ flush_args->ctxt_to_hw_map);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Flush all failed");
+ break;
+
+ case CAM_FLUSH_TYPE_REQ:
+ if ((!lrme_core->req_submit ||
+ lrme_core->req_submit != flush_args->req_to_flush) &&
+ (!lrme_core->req_proc ||
+ lrme_core->req_proc != flush_args->req_to_flush)) {
+ mutex_unlock(&lrme_hw->hw_mutex);
+ CAM_DBG(CAM_LRME, "hw running on different ctx");
+ return 0;
+ }
+ rc = cam_lrme_hw_util_flush_req(lrme_hw,
+ flush_args->req_to_flush);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Flush req failed");
+ break;
+
+ default:
+ CAM_ERR(CAM_LRME, "Unsupported flush type");
+ break;
+ }
+
+ mutex_unlock(&lrme_hw->hw_mutex);
+
+ return rc;
+}
+
int cam_lrme_hw_get_caps(void *hw_priv, void *get_hw_cap_args,
uint32_t arg_size)
{
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c
index 2e63752..da42c84 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c
@@ -201,6 +201,7 @@
lrme_hw_intf.hw_ops.read = NULL;
lrme_hw_intf.hw_ops.write = NULL;
lrme_hw_intf.hw_ops.process_cmd = cam_lrme_hw_process_cmd;
+ lrme_hw_intf.hw_ops.flush = cam_lrme_hw_flush;
lrme_hw_intf.hw_type = CAM_HW_LRME;
rc = cam_cdm_get_iommu_handle("lrmecdm", &lrme_core->cdm_iommu);
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
index 46e9d5d..3d230af 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
@@ -31,6 +31,7 @@
idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
if (idx >= CAM_SYNC_MAX_OBJS)
return -ENOMEM;
+ CAM_DBG(CAM_SYNC, "Index location available at idx: %ld", idx);
bit = test_and_set_bit(idx, sync_dev->bitmap);
} while (bit);
@@ -97,6 +98,8 @@
INIT_WORK(&sync_cb->cb_dispatch_work,
cam_sync_util_cb_dispatch);
sync_cb->status = row->state;
+ CAM_DBG(CAM_SYNC, "Callback trigger for sync object:%d",
+ sync_cb->sync_obj);
queue_work(sync_dev->work_queue,
&sync_cb->cb_dispatch_work);
@@ -134,6 +137,8 @@
return -EINVAL;
}
+ CAM_DBG(CAM_SYNC, "deregistered callback for sync object:%d",
+ sync_obj);
list_for_each_entry_safe(sync_cb, temp, &row->callback_list, list) {
if (sync_cb->callback_func == cb_func &&
sync_cb->cb_data == userdata) {
@@ -202,6 +207,9 @@
rc = cam_sync_util_add_to_signalable_list(sync_obj, status, &sync_list);
if (rc < 0) {
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ CAM_ERR(CAM_SYNC,
+ "Error: Unable to add sync object :%d to signalable list",
+ sync_obj);
return rc;
}
@@ -261,6 +269,7 @@
}
/* Dispatch kernel callbacks if any were registered earlier */
+
list_for_each_entry_safe(sync_cb,
temp_sync_cb, &signalable_row->callback_list, list) {
sync_cb->status = list_info->status;
@@ -347,7 +356,7 @@
spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
return -EINVAL;
}
-
+ CAM_DBG(CAM_SYNC, "Init row at idx:%ld to merge objects", idx);
*merged_obj = idx;
spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
index afac68d..ed69829 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
@@ -51,6 +51,8 @@
init_completion(&row->signaled);
INIT_LIST_HEAD(&row->callback_list);
INIT_LIST_HEAD(&row->user_payload_list);
+ CAM_DBG(CAM_SYNC, "Sync object Initialised: sync_id:%u row_state:%u ",
+ row->sync_id, row->state);
return 0;
}
@@ -215,6 +217,7 @@
spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
return -EINVAL;
}
+ row->state = CAM_SYNC_STATE_INVALID;
/* Object's child and parent objects will be added into this list */
INIT_LIST_HEAD(&temp_child_list);
@@ -303,7 +306,6 @@
kfree(sync_cb);
}
- row->state = CAM_SYNC_STATE_INVALID;
memset(row, 0, sizeof(*row));
clear_bit(idx, sync_dev->bitmap);
INIT_LIST_HEAD(&row->callback_list);
@@ -312,6 +314,7 @@
INIT_LIST_HEAD(&row->user_payload_list);
spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+ CAM_DBG(CAM_SYNC, "Destroying sync obj:%d successful", idx);
return 0;
}
@@ -349,6 +352,8 @@
memcpy(payload_data, payload, len);
v4l2_event_queue(sync_dev->vdev, &event);
+ CAM_DBG(CAM_SYNC, "send v4l2 event for sync_obj :%d",
+ sync_obj);
}
int cam_sync_util_validate_merge(uint32_t *sync_obj, uint32_t num_objs)
@@ -391,6 +396,8 @@
signalable_info->status = status;
list_add_tail(&signalable_info->list, sync_list);
+ CAM_DBG(CAM_SYNC, "Add sync_obj :%d with status :%d to signalable list",
+ sync_obj, status);
return 0;
}
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
index 786107b..bd56310 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
@@ -325,7 +325,7 @@
*
* @return: success or failure
*/
-static int cam_soc_util_clk_enable_default(struct cam_hw_soc_info *soc_info,
+int cam_soc_util_clk_enable_default(struct cam_hw_soc_info *soc_info,
enum cam_vote_level clk_level)
{
int i, rc = 0;
@@ -372,7 +372,7 @@
*
* @return: success or failure
*/
-static void cam_soc_util_clk_disable_default(struct cam_hw_soc_info *soc_info)
+void cam_soc_util_clk_disable_default(struct cam_hw_soc_info *soc_info)
{
int i;
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
index 4a87d50..4b57d54 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
@@ -611,4 +611,9 @@
int cam_soc_util_reg_dump(struct cam_hw_soc_info *soc_info,
uint32_t base_index, uint32_t offset, int size);
+void cam_soc_util_clk_disable_default(struct cam_hw_soc_info *soc_info);
+
+int cam_soc_util_clk_enable_default(struct cam_hw_soc_info *soc_info,
+ enum cam_vote_level clk_level);
+
#endif /* _CAM_SOC_UTIL_H_ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index fd031d7..fa5eb49 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -588,7 +588,7 @@
{
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
uint32_t sid_info;
- struct scm_desc desc;
+ struct scm_desc desc = {0};
unsigned int resp = 0;
int ret = 0;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index 7c36934..9f2ce5f 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -711,7 +711,7 @@
u32 int_mask = (REGDMA_INT_0_MASK | REGDMA_INT_1_MASK |
REGDMA_INT_2_MASK);
u32 last_ts[ROT_QUEUE_MAX] = {0,};
- u32 latest_ts;
+ u32 latest_ts, opmode;
int elapsed_time, t;
int i, j;
unsigned long flags;
@@ -723,7 +723,15 @@
/* sw reset the hw rotator */
SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_SW_RESET_OVERRIDE, 1);
+ /* ensure write is issued to the rotator HW */
+ wmb();
usleep_range(MS_TO_US(10), MS_TO_US(20));
+
+ /* force rotator into offline mode */
+ opmode = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_OP_MODE);
+ SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_OP_MODE,
+ opmode & ~(BIT(5) | BIT(4) | BIT(1) | BIT(0)));
+
SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_SW_RESET_OVERRIDE, 0);
/* halt vbif xin client to ensure no pending transaction */
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 3f7e7bb..146ca6f 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -4111,9 +4111,15 @@
ion_phys_addr_t pa;
struct ion_handle *ihandle = NULL;
u8 *img_data = NULL;
+ int retry = 0;
- ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
- SZ_4K, ION_HEAP(ION_QSECOM_HEAP_ID), 0);
+ do {
+ if (retry++)
+ msleep(QSEECOM_TA_ION_ALLOCATE_DELAY);
+ ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
+ SZ_4K, ION_HEAP(ION_QSECOM_TA_HEAP_ID), 0);
+ } while (IS_ERR_OR_NULL(ihandle) &&
+ (retry <= QSEECOM_TA_ION_ALLOCATE_MAX_ATTEMP));
if (IS_ERR_OR_NULL(ihandle)) {
pr_err("ION alloc failed\n");
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index ba140ea..5b5ad0f 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -80,6 +80,14 @@
This driver can also be built as a module. If so, the module
will be called nvmem_rockchip_efuse.
+config NVMEM_SPMI_SDAM
+ tristate "SPMI SDAM Support"
+ depends on SPMI
+ help
+ This driver supports the Shared Direct Access Memory Module on
+ Qualcomm Technologies, Inc. PMICs. It provides the clients
+ an interface to read/write to the SDAM module's shared memory.
+
config NVMEM_SUNXI_SID
tristate "Allwinner SoCs SID support"
depends on ARCH_SUNXI
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 8f942a0..1e99e8c 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -18,6 +18,7 @@
nvmem_qfprom-y := qfprom.o
obj-$(CONFIG_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o
nvmem_rockchip_efuse-y := rockchip-efuse.o
+obj-$(CONFIG_NVMEM_SPMI_SDAM) += qcom-spmi-sdam.o
obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o
nvmem_sunxi_sid-y := sunxi_sid.o
obj-$(CONFIG_NVMEM_VF610_OCOTP) += nvmem-vf610-ocotp.o
diff --git a/drivers/nvmem/qcom-spmi-sdam.c b/drivers/nvmem/qcom-spmi-sdam.c
new file mode 100644
index 0000000..01a89cb
--- /dev/null
+++ b/drivers/nvmem/qcom-spmi-sdam.c
@@ -0,0 +1,205 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/nvmem-provider.h>
+#include <linux/regmap.h>
+
+#define SDAM_MEM_START 0x40
+#define REGISTER_MAP_ID 0x40
+#define REGISTER_MAP_VERSION 0x41
+#define SDAM_SIZE 0x44
+#define SDAM_PBS_TRIG_SET 0xE5
+#define SDAM_PBS_TRIG_CLR 0xE6
+
+struct sdam_chip {
+ struct platform_device *pdev;
+ struct regmap *regmap;
+ int base;
+ int size;
+};
+
+/* read only register offsets */
+static const u8 sdam_ro_map[] = {
+ REGISTER_MAP_ID,
+ REGISTER_MAP_VERSION,
+ SDAM_SIZE
+};
+
+static bool is_valid(struct sdam_chip *sdam, unsigned int offset, size_t len)
+{
+ int sdam_mem_end = SDAM_MEM_START + sdam->size - 1;
+
+ if (!len)
+ return false;
+
+ if (offset >= SDAM_MEM_START && offset <= sdam_mem_end
+ && (offset + len - 1) <= sdam_mem_end)
+ return true;
+ else if ((offset == SDAM_PBS_TRIG_SET || offset == SDAM_PBS_TRIG_CLR)
+ && (len == 1))
+ return true;
+
+ return false;
+}
+
+static bool is_ro(unsigned int offset, size_t len)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sdam_ro_map); i++)
+ if (offset <= sdam_ro_map[i] && (offset + len) > sdam_ro_map[i])
+ return true;
+
+ return false;
+}
+
+static int sdam_read(void *priv, unsigned int offset, void *val, size_t bytes)
+{
+ struct sdam_chip *sdam = priv;
+ int rc;
+
+ if (!is_valid(sdam, offset, bytes)) {
+ pr_err("Invalid SDAM offset 0x%02x len=%zd\n", offset, bytes);
+ return -EINVAL;
+ }
+
+ rc = regmap_bulk_read(sdam->regmap, sdam->base + offset, val, bytes);
+ if (rc < 0)
+ pr_err("Failed to read SDAM offset 0x%02x len=%zd, rc=%d\n",
+ offset, bytes, rc);
+
+ return rc;
+}
+
+static int sdam_write(void *priv, unsigned int offset, void *val, size_t bytes)
+{
+ struct sdam_chip *sdam = priv;
+ int rc;
+
+ if (!is_valid(sdam, offset, bytes)) {
+ pr_err("Invalid SDAM offset 0x%02x len=%zd\n", offset, bytes);
+ return -EINVAL;
+ }
+
+ if (is_ro(offset, bytes)) {
+ pr_err("Invalid write offset 0x%02x len=%zd\n", offset, bytes);
+ return -EINVAL;
+ }
+
+ rc = regmap_bulk_write(sdam->regmap, sdam->base + offset, val, bytes);
+ if (rc < 0)
+ pr_err("Failed to write SDAM offset 0x%02x len=%zd, rc=%d\n",
+ offset, bytes, rc);
+
+ return rc;
+}
+
+static int sdam_probe(struct platform_device *pdev)
+{
+ struct sdam_chip *sdam;
+ struct nvmem_device *nvmem;
+ struct nvmem_config *sdam_config;
+ unsigned int val = 0;
+ int rc;
+
+ sdam = devm_kzalloc(&pdev->dev, sizeof(*sdam), GFP_KERNEL);
+ if (!sdam)
+ return -ENOMEM;
+
+ sdam_config = devm_kzalloc(&pdev->dev, sizeof(*sdam_config),
+ GFP_KERNEL);
+ if (!sdam_config)
+ return -ENOMEM;
+
+ sdam->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!sdam->regmap) {
+ pr_err("Failed to get regmap handle\n");
+ return -ENXIO;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node, "reg", &sdam->base);
+ if (rc < 0) {
+ pr_err("Failed to get SDAM base, rc=%d\n", rc);
+ return -EINVAL;
+ }
+
+ rc = regmap_read(sdam->regmap, sdam->base + SDAM_SIZE, &val);
+ if (rc < 0) {
+ pr_err("Failed to read SDAM_SIZE rc=%d\n", rc);
+ return -EINVAL;
+ }
+ sdam->size = val * 32;
+
+ sdam_config->dev = &pdev->dev;
+ sdam_config->name = "spmi_sdam";
+ sdam_config->id = pdev->id;
+ sdam_config->owner = THIS_MODULE,
+ sdam_config->stride = 1;
+ sdam_config->word_size = 1;
+ sdam_config->reg_read = sdam_read;
+ sdam_config->reg_write = sdam_write;
+ sdam_config->priv = sdam;
+
+ nvmem = nvmem_register(sdam_config);
+ if (IS_ERR(nvmem)) {
+ pr_err("Failed to register SDAM nvmem device rc=%ld\n",
+ PTR_ERR(nvmem));
+ return -ENXIO;
+ }
+ platform_set_drvdata(pdev, nvmem);
+
+ pr_info("SDAM base=0x%04x size=%d registered successfully\n",
+ sdam->base, sdam->size);
+
+ return 0;
+}
+
+static int sdam_remove(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+ return nvmem_unregister(nvmem);
+}
+
+static const struct of_device_id sdam_match_table[] = {
+ {.compatible = "qcom,spmi-sdam"},
+ {},
+};
+
+static struct platform_driver sdam_driver = {
+ .driver = {
+ .name = "qcom,spmi-sdam",
+ .owner = THIS_MODULE,
+ .of_match_table = sdam_match_table,
+ },
+ .probe = sdam_probe,
+ .remove = sdam_remove,
+};
+
+static int __init sdam_init(void)
+{
+ return platform_driver_register(&sdam_driver);
+}
+subsys_initcall(sdam_init);
+
+static void __exit sdam_exit(void)
+{
+ return platform_driver_unregister(&sdam_driver);
+}
+module_exit(sdam_exit);
+
+MODULE_DESCRIPTION("QCOM SPMI SDAM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index 9e0989c..fd5c515 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -5560,7 +5560,7 @@
u32 val;
pci_read_config_dword(child_pdev,
- pdev->pcie_cap + PCI_EXP_LNKCTL, &val);
+ child_pdev->pcie_cap + PCI_EXP_LNKCTL, &val);
child_l0s_enable = !!(val & PCI_EXP_LNKCTL_ASPM_L0S);
if (child_l0s_enable)
break;
@@ -5608,7 +5608,7 @@
u32 val;
pci_read_config_dword(child_pdev,
- pdev->pcie_cap + PCI_EXP_LNKCTL, &val);
+ child_pdev->pcie_cap + PCI_EXP_LNKCTL, &val);
child_l1_enable = !!(val & PCI_EXP_LNKCTL_ASPM_L1);
if (child_l1_enable)
break;
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
index 7a683ec..906e911 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
@@ -19,7 +19,9 @@
#include <linux/ipa_qmi_service_v01.h>
#include <linux/ipa_mhi.h>
#include "../ipa_common_i.h"
+#ifdef CONFIG_IPA3
#include "../ipa_v3/ipa_pm.h"
+#endif
#define IPA_MHI_DRV_NAME "ipa_mhi_client"
#define IPA_MHI_DBG(fmt, args...) \
@@ -842,6 +844,7 @@
IPA_MHI_DBG("event_context_array_addr 0x%llx\n",
ipa_mhi_client_ctx->event_context_array_addr);
+#ifdef CONFIG_IPA3
if (ipa_pm_is_used()) {
res = ipa_pm_activate_sync(ipa_mhi_client_ctx->pm_hdl);
if (res) {
@@ -854,6 +857,7 @@
goto fail_pm_activate_modem;
}
} else {
+#endif
/* Add MHI <-> Q6 dependencies to IPA RM */
res = ipa_rm_add_dependency(IPA_RM_RESOURCE_MHI_PROD,
IPA_RM_RESOURCE_Q6_CONS);
@@ -874,7 +878,9 @@
IPA_MHI_ERR("failed request prod %d\n", res);
goto fail_request_prod;
}
+#ifdef CONFIG_IPA3
}
+#endif
/* gsi params */
init_params.gsi.first_ch_idx =
@@ -912,12 +918,14 @@
ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD,
IPA_RM_RESOURCE_Q6_CONS);
fail_add_mhi_q6_dep:
+#ifdef CONFIG_IPA3
if (ipa_pm_is_used())
ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
fail_pm_activate_modem:
if (ipa_pm_is_used())
ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
fail_pm_activate:
+#endif
ipa_mhi_set_state(IPA_MHI_STATE_INITIALIZED);
return res;
}
@@ -2124,6 +2132,7 @@
*/
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+#ifdef CONFIG_IPA3
if (ipa_pm_is_used()) {
res = ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
if (res) {
@@ -2136,6 +2145,7 @@
goto fail_deactivate_modem_pm;
}
} else {
+#endif
IPA_MHI_DBG("release prod\n");
res = ipa_mhi_release_prod();
if (res) {
@@ -2149,7 +2159,9 @@
IPA_MHI_ERR("ipa_mhi_wait_for_cons_release failed\n");
goto fail_release_cons;
}
+#ifdef CONFIG_IPA3
}
+#endif
usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN, IPA_MHI_SUSPEND_SLEEP_MAX);
res = ipa_mhi_suspend_dl(force);
@@ -2176,12 +2188,14 @@
if (!ipa_pm_is_used())
ipa_mhi_request_prod();
fail_release_prod:
+#ifdef CONFIG_IPA3
if (ipa_pm_is_used())
ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
fail_deactivate_modem_pm:
if (ipa_pm_is_used())
ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
fail_deactivate_pm:
+#endif
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
fail_suspend_ul_channel:
ipa_mhi_resume_channels(true, ipa_mhi_client_ctx->ul_channels);
@@ -2241,6 +2255,7 @@
ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_GRANTED;
}
+#ifdef CONFIG_IPA3
if (ipa_pm_is_used()) {
res = ipa_pm_activate_sync(ipa_mhi_client_ctx->pm_hdl);
if (res) {
@@ -2253,12 +2268,15 @@
goto fail_pm_activate_modem;
}
} else {
+#endif
res = ipa_mhi_request_prod();
if (res) {
IPA_MHI_ERR("ipa_mhi_request_prod failed %d\n", res);
goto fail_request_prod;
}
+#ifdef CONFIG_IPA3
}
+#endif
/* resume all UL channels */
res = ipa_mhi_resume_channels(false,
@@ -2298,12 +2316,14 @@
if (!ipa_pm_is_used())
ipa_mhi_release_prod();
fail_request_prod:
+#ifdef CONFIG_IPA3
if (ipa_pm_is_used())
ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
fail_pm_activate_modem:
if (ipa_pm_is_used())
ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
fail_pm_activate:
+#endif
ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels);
fail_resume_dl_channels:
ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED);
@@ -2455,6 +2475,7 @@
ipa_assert();
}
+#ifdef CONFIG_IPA3
static void ipa_mhi_deregister_pm(void)
{
ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
@@ -2465,7 +2486,7 @@
ipa_pm_deregister(ipa_mhi_client_ctx->modem_pm_hdl);
ipa_mhi_client_ctx->modem_pm_hdl = ~0;
}
-
+#endif
/**
* ipa_mhi_destroy() - Destroy MHI IPA
*
@@ -2498,9 +2519,11 @@
ipa_uc_mhi_cleanup();
}
+#ifdef CONFIG_IPA3
if (ipa_pm_is_used())
ipa_mhi_deregister_pm();
else
+#endif
ipa_mhi_delete_rm_resources();
ipa_dma_destroy();
@@ -2516,6 +2539,7 @@
ipa_assert();
}
+#ifdef CONFIG_IPA3
static void ipa_mhi_pm_cb(void *p, enum ipa_pm_cb_event event)
{
unsigned long flags;
@@ -2587,7 +2611,7 @@
ipa_mhi_client_ctx->pm_hdl = ~0;
return res;
}
-
+#endif
static int ipa_mhi_create_rm_resources(void)
{
int res;
@@ -2725,9 +2749,11 @@
goto fail_dma_init;
}
+#ifdef CONFIG_IPA3
if (ipa_pm_is_used())
res = ipa_mhi_register_pm();
else
+#endif
res = ipa_mhi_create_rm_resources();
if (res) {
IPA_MHI_ERR("failed to create RM resources\n");
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
index e19d297..f0c67b1 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
@@ -13,7 +13,9 @@
#include <linux/ipa_uc_offload.h>
#include <linux/msm_ipa.h>
#include "../ipa_common_i.h"
+#ifdef CONFIG_IPA3
#include "../ipa_v3/ipa_pm.h"
+#endif
#define IPA_NTN_DMA_POOL_ALIGNMENT 8
#define OFFLOAD_DRV_NAME "ipa_uc_offload"
@@ -115,6 +117,7 @@
return 0;
}
+#ifdef CONFIG_IPA3
static void ipa_uc_offload_ntn_pm_cb(void *p, enum ipa_pm_cb_event event)
{
/* suspend/resume is not supported */
@@ -156,6 +159,7 @@
ipa_pm_deactivate_sync(ntn_ctx->pm_hdl);
ipa_pm_deregister(ntn_ctx->pm_hdl);
}
+#endif
static int ipa_uc_offload_ntn_create_rm_resources(
struct ipa_uc_offload_ctx *ntn_ctx)
{
@@ -203,9 +207,11 @@
IPA_UC_OFFLOAD_DBG("register interface for netdev %s\n",
inp->netdev_name);
+#ifdef CONFIG_IPA3
if (ipa_pm_is_used())
ret = ipa_uc_offload_ntn_register_pm_client(ntn_ctx);
else
+#endif
ret = ipa_uc_offload_ntn_create_rm_resources(ntn_ctx);
if (ret) {
IPA_UC_OFFLOAD_ERR("fail to create rm resource\n");
@@ -289,12 +295,16 @@
fail:
kfree(hdr);
fail_alloc:
+#ifdef CONFIG_IPA3
if (ipa_pm_is_used()) {
ipa_uc_offload_ntn_deregister_pm_client(ntn_ctx);
} else {
+#endif
ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_CONS);
ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
+#ifdef CONFIG_IPA3
}
+#endif
return ret;
}
@@ -412,6 +422,7 @@
return -EINVAL;
}
+#ifdef CONFIG_IPA3
if (ipa_pm_is_used()) {
result = ipa_pm_activate_sync(ntn_ctx->pm_hdl);
if (result) {
@@ -419,6 +430,7 @@
return result;
}
} else {
+#endif
result = ipa_rm_add_dependency(IPA_RM_RESOURCE_ETHERNET_PROD,
IPA_RM_RESOURCE_APPS_CONS);
if (result) {
@@ -440,7 +452,9 @@
result = -EFAULT;
goto fail;
}
+#ifdef CONFIG_IPA3
}
+#endif
ntn_ctx->state = IPA_UC_OFFLOAD_STATE_UP;
result = ipa_setup_uc_ntn_pipes(inp, ntn_ctx->notify,
@@ -529,11 +543,12 @@
return -EINVAL;
}
+#ifdef CONFIG_IPA3
if (ipa_pm_is_used())
return ipa_pm_set_perf_profile(
ipa_uc_offload_ctx[IPA_UC_NTN]->pm_hdl,
profile->max_supported_bw_mbps);
-
+#endif
if (ipa_rm_set_perf_profile(resource_name, &rm_profile)) {
IPA_UC_OFFLOAD_ERR("fail to setup rm perf profile\n");
return -EFAULT;
@@ -550,6 +565,7 @@
ntn_ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED;
+#ifdef CONFIG_IPA3
if (ipa_pm_is_used()) {
ret = ipa_pm_deactivate_sync(ntn_ctx->pm_hdl);
if (ret) {
@@ -558,6 +574,7 @@
return -EFAULT;
}
} else {
+#endif
ret = ipa_rm_release_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
if (ret) {
IPA_UC_OFFLOAD_ERR("fail release ETHERNET_PROD: %d\n",
@@ -571,7 +588,9 @@
IPA_UC_OFFLOAD_ERR("fail del dep ETH->APPS, %d\n", ret);
return -EFAULT;
}
+#ifdef CONFIG_IPA3
}
+#endif
ipa_ep_idx_ul = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_PROD);
ipa_ep_idx_dl = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_CONS);
@@ -627,9 +646,11 @@
int len, result = 0;
struct ipa_ioc_del_hdr *hdr;
+#ifdef CONFIG_IPA3
if (ipa_pm_is_used()) {
ipa_uc_offload_ntn_deregister_pm_client(ntn_ctx);
} else {
+#endif
if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_PROD)) {
IPA_UC_OFFLOAD_ERR("fail to delete ETHERNET_PROD\n");
return -EFAULT;
@@ -639,8 +660,9 @@
IPA_UC_OFFLOAD_ERR("fail to delete ETHERNET_CONS\n");
return -EFAULT;
}
+#ifdef CONFIG_IPA3
}
-
+#endif
len = sizeof(struct ipa_ioc_del_hdr) + 2 * sizeof(struct ipa_hdr_del);
hdr = kzalloc(len, GFP_KERNEL);
if (hdr == NULL) {
diff --git a/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
index f0d1102..df546cd 100644
--- a/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
@@ -27,7 +27,9 @@
#include <linux/cdev.h>
#include <linux/ipa_odu_bridge.h>
#include "../ipa_common_i.h"
+#ifdef CONFIG_IPA3
#include "../ipa_v3/ipa_pm.h"
+#endif
#define ODU_BRIDGE_DRV_NAME "odu_ipa_bridge"
diff --git a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
index 583c0ac8..4f60896 100644
--- a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
@@ -28,7 +28,9 @@
#include <linux/rndis_ipa.h>
#include <linux/workqueue.h>
#include "../ipa_common_i.h"
+#ifdef CONFIG_IPA3
#include "../ipa_v3/ipa_pm.h"
+#endif
#define CREATE_TRACE_POINTS
#include "rndis_ipa_trace.h"
@@ -240,8 +242,10 @@
unsigned long data);
static int rndis_ipa_create_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx);
static int rndis_ipa_destroy_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx);
+#ifdef CONFIG_IPA3
static int rndis_ipa_register_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx);
static int rndis_ipa_deregister_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx);
+#endif
static bool rx_filter(struct sk_buff *skb);
static bool tx_filter(struct sk_buff *skb);
static bool rm_enabled(struct rndis_ipa_dev *rndis_ipa_ctx);
@@ -721,9 +725,11 @@
return -EINVAL;
}
+#ifdef CONFIG_IPA3
if (ipa_pm_is_used())
result = rndis_ipa_register_pm_client(rndis_ipa_ctx);
else
+#endif
result = rndis_ipa_create_rm_resource(rndis_ipa_ctx);
if (result) {
RNDIS_IPA_ERROR("fail on RM create\n");
@@ -787,9 +793,11 @@
return 0;
fail:
+#ifdef CONFIG_IPA3
if (ipa_pm_is_used())
rndis_ipa_deregister_pm_client(rndis_ipa_ctx);
else
+#endif
rndis_ipa_destroy_rm_resource(rndis_ipa_ctx);
fail_create_rm:
return result;
@@ -1262,9 +1270,11 @@
rndis_ipa_ctx->net->stats.tx_dropped += outstanding_dropped_pkts;
atomic_set(&rndis_ipa_ctx->outstanding_pkts, 0);
+#ifdef CONFIG_IPA3
if (ipa_pm_is_used())
retval = rndis_ipa_deregister_pm_client(rndis_ipa_ctx);
else
+#endif
retval = rndis_ipa_destroy_rm_resource(rndis_ipa_ctx);
if (retval) {
RNDIS_IPA_ERROR("Fail to clean RM\n");
@@ -1822,6 +1832,7 @@
return result;
}
+#ifdef CONFIG_IPA3
static void rndis_ipa_pm_cb(void *p, enum ipa_pm_cb_event event)
{
struct rndis_ipa_dev *rndis_ipa_ctx = p;
@@ -1844,7 +1855,7 @@
RNDIS_IPA_LOG_EXIT();
}
-
+#endif
/**
* rndis_ipa_destroy_rm_resource() - delete the dependency and destroy
* the resource done on rndis_ipa_create_rm_resource()
@@ -1904,6 +1915,7 @@
return result;
}
+#ifdef CONFIG_IPA3
static int rndis_ipa_register_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx)
{
int result;
@@ -1930,7 +1942,7 @@
rndis_ipa_ctx->pm_hdl = ~0;
return 0;
}
-
+#endif
/**
* resource_request() - request for the Netdev resource
* @rndis_ipa_ctx: main driver context
@@ -1951,9 +1963,10 @@
if (!rm_enabled(rndis_ipa_ctx))
return result;
+#ifdef CONFIG_IPA3
if (ipa_pm_is_used())
return ipa_pm_activate(rndis_ipa_ctx->pm_hdl);
-
+#endif
return ipa_rm_inactivity_timer_request_resource(
DRV_RESOURCE_ID);
@@ -1972,9 +1985,11 @@
{
if (!rm_enabled(rndis_ipa_ctx))
return;
+#ifdef CONFIG_IPA3
if (ipa_pm_is_used())
ipa_pm_deferred_deactivate(rndis_ipa_ctx->pm_hdl);
else
+#endif
ipa_rm_inactivity_timer_release_resource(DRV_RESOURCE_ID);
return;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index 3faf204..f19e9d6 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -30,10 +30,9 @@
#include <linux/msm-bus-board.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
-#include <linux/qcom_iommu.h>
#include <linux/time.h>
#include <linux/hashtable.h>
-#include <linux/hash.h>
+#include <linux/jhash.h>
#include "ipa_i.h"
#include "../ipa_rm_i.h"
@@ -364,6 +363,8 @@
static void ipa2_active_clients_log_destroy(void)
{
ipa_ctx->ipa2_active_clients_logging.log_rdy = 0;
+ kfree(active_clients_table_buf);
+ active_clients_table_buf = NULL;
kfree(ipa_ctx->ipa2_active_clients_logging.log_buffer[0]);
ipa_ctx->ipa2_active_clients_logging.log_head = 0;
ipa_ctx->ipa2_active_clients_logging.log_tail =
@@ -3282,7 +3283,7 @@
hfound = NULL;
memset(str_to_hash, 0, IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN);
strlcpy(str_to_hash, id->id_string, IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN);
- hkey = arch_fast_hash(str_to_hash, IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN,
+ hkey = jhash(str_to_hash, IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN,
0);
hash_for_each_possible(ipa_ctx->ipa2_active_clients_logging.htable,
hentry, list, hkey) {
@@ -3731,7 +3732,8 @@
* acquire wake lock as long as suspend
* vote is held
*/
- ipa_inc_acquire_wakelock();
+ ipa_inc_acquire_wakelock(
+ IPA_WAKELOCK_REF_CLIENT_SPS);
ipa_sps_process_irq_schedule_rel();
}
mutex_unlock(&ipa_ctx->sps_pm.sps_pm_lock);
@@ -3804,7 +3806,7 @@
ipa_sps_process_irq_schedule_rel();
} else {
atomic_set(&ipa_ctx->sps_pm.dec_clients, 0);
- ipa_dec_release_wakelock();
+ ipa_dec_release_wakelock(IPA_WAKELOCK_REF_CLIENT_SPS);
IPA_ACTIVE_CLIENTS_DEC_SPECIAL("SPS_RESOURCE");
}
}
@@ -3923,6 +3925,7 @@
ipa_ctx->skip_uc_pipe_reset = resource_p->skip_uc_pipe_reset;
ipa_ctx->use_dma_zone = resource_p->use_dma_zone;
ipa_ctx->tethered_flow_control = resource_p->tethered_flow_control;
+ ipa_ctx->use_ipa_pm = resource_p->use_ipa_pm;
/* Setting up IPA RX Polling Timeout Seconds */
ipa_rx_timeout_min_max_calc(&ipa_ctx->ipa_rx_min_timeout_usec,
@@ -3974,14 +3977,16 @@
ipa_ctx->ctrl->msm_bus_data_ptr);
if (!ipa_ctx->ipa_bus_hdl) {
IPAERR("fail to register with bus mgr!\n");
- result = -ENODEV;
+ result = -EPROBE_DEFER;
+ bus_scale_table = NULL;
goto fail_bus_reg;
}
} else {
IPADBG("Skipping bus scaling registration on Virtual plat\n");
}
- if (ipa2_active_clients_log_init())
+ result = ipa2_active_clients_log_init();
+ if (result)
goto fail_init_active_client;
/* get IPA clocks */
@@ -4430,11 +4435,11 @@
ipa2_active_clients_log_destroy();
fail_init_active_client:
msm_bus_scale_unregister_client(ipa_ctx->ipa_bus_hdl);
-fail_bus_reg:
if (bus_scale_table) {
msm_bus_cl_clear_pdata(bus_scale_table);
bus_scale_table = NULL;
}
+fail_bus_reg:
fail_bind:
kfree(ipa_ctx->ctrl);
fail_mem_ctrl:
@@ -4446,12 +4451,20 @@
return result;
}
+bool ipa_pm_is_used(void)
+{
+ return (ipa_ctx) ? ipa_ctx->use_ipa_pm : false;
+}
+
static int get_ipa_dts_configuration(struct platform_device *pdev,
struct ipa_plat_drv_res *ipa_drv_res)
{
int result;
struct resource *resource;
+ ipa_drv_res->use_ipa_pm = of_property_read_bool(pdev->dev.of_node,
+ "qcom,use-ipa-pm");
+ IPADBG("use_ipa_pm=%d\n", ipa_drv_res->use_ipa_pm);
/* initialize ipa_res */
ipa_drv_res->ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST;
ipa_drv_res->ipa_pipe_mem_size = IPA_PIPE_MEM_SIZE;
@@ -4666,7 +4679,7 @@
IPADBG("sub pdev=%p\n", dev);
cb->dev = dev;
- cb->iommu = iommu_domain_alloc(msm_iommu_get_bus(dev));
+ cb->iommu = iommu_domain_alloc(&platform_bus_type);
if (!cb->iommu) {
IPAERR("could not alloc iommu domain\n");
/* assume this failure is because iommu driver is not ready */
@@ -4761,7 +4774,7 @@
IPADBG("UC CB PROBE=%p create IOMMU mapping\n", dev);
cb->dev = dev;
- cb->mapping = arm_iommu_create_mapping(msm_iommu_get_bus(dev),
+ cb->mapping = arm_iommu_create_mapping(&platform_bus_type,
cb->va_start, cb->va_size);
if (IS_ERR_OR_NULL(cb->mapping)) {
IPADBG("Fail to create mapping\n");
@@ -4850,7 +4863,7 @@
}
cb->dev = dev;
- cb->mapping = arm_iommu_create_mapping(msm_iommu_get_bus(dev),
+ cb->mapping = arm_iommu_create_mapping(&platform_bus_type,
cb->va_start,
cb->va_size);
if (IS_ERR_OR_NULL(cb->mapping)) {
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index 3cb86d0..9f71d7b 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -2903,10 +2903,12 @@
struct ipa_ep_context *ep;
unsigned int src_pipe;
u32 metadata;
+ u8 ucp;
status = (struct ipa_hw_pkt_status *)rx_skb->data;
src_pipe = status->endp_src_idx;
metadata = status->metadata;
+ ucp = status->ucp;
ep = &ipa_ctx->ep[src_pipe];
if (unlikely(src_pipe >= ipa_ctx->ipa_num_pipes ||
!ep->valid ||
@@ -2930,8 +2932,10 @@
* ------------------------------------------
*/
*(u16 *)rx_skb->cb = ((metadata >> 16) & 0xFFFF);
+ *(u8 *)(rx_skb->cb + 4) = ucp;
IPADBG_LOW("meta_data: 0x%x cb: 0x%x\n",
metadata, *(u32 *)rx_skb->cb);
+ IPADBG_LOW("ucp: %d\n", *(u8 *)(rx_skb->cb + 4));
ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
index 5569979..5459590 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
@@ -910,8 +910,17 @@
return -EINVAL;
}
- if (by_user)
+ if (by_user) {
+ if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
+ IPADBG("Trying to delete hdr %s offset=%u\n",
+ entry->name, entry->offset_entry->offset);
+ if (!entry->offset_entry->offset) {
+ IPAERR("User cannot delete default header\n");
+ return -EPERM;
+ }
+ }
entry->user_deleted = true;
+ }
if (--entry->ref_cnt) {
IPADBG("hdr_hdl %x ref_cnt %d\n", hdr_hdl, entry->ref_cnt);
@@ -1234,13 +1243,18 @@
/* do not remove the default header */
if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
- if (entry->is_hdr_proc_ctx) {
- mutex_unlock(&ipa_ctx->lock);
- WARN_ON(1);
- IPAERR("default header is proc ctx\n");
- return -EFAULT;
+ IPADBG("Trying to remove hdr %s offset=%u\n",
+ entry->name, entry->offset_entry->offset);
+ if (!entry->offset_entry->offset) {
+ if (entry->is_hdr_proc_ctx) {
+ mutex_unlock(&ipa_ctx->lock);
+ WARN_ON(1);
+ IPAERR("default header is proc ctx\n");
+ return -EFAULT;
+ }
+ IPADBG("skip default header\n");
+ continue;
}
- continue;
}
if (ipa_id_find(entry->id) == NULL) {
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index 0ed32f8..ec4942f 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -1206,6 +1206,7 @@
int num_ipa_cne_evt_req;
struct mutex ipa_cne_evt_lock;
bool ipa_uc_monitor_holb;
+ bool use_ipa_pm;
};
/**
@@ -1262,6 +1263,7 @@
u32 ipa_rx_polling_sleep_msec;
u32 ipa_polling_iteration;
bool ipa_uc_monitor_holb;
+ bool use_ipa_pm;
};
struct ipa_mem_partition {
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
index f8a0ded..4ffbd55 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
@@ -706,7 +706,9 @@
req->filter_index_list[i].filter_handle,
req->filter_index_list[i].filter_index);
return -EINVAL;
- } else if (req->install_status != IPA_QMI_RESULT_SUCCESS_V01) {
+ }
+
+ if (req->install_status != IPA_QMI_RESULT_SUCCESS_V01) {
IPAWANERR(" UL filter rule for pipe %d install_status = %d\n",
req->source_pipe_index, req->install_status);
return -EINVAL;
@@ -1029,7 +1031,7 @@
qmi_indication_fin = false;
atomic_set(&workqueues_stopped, 0);
- if (atomic_read(&ipa_qmi_initialized == 0))
+ if (atomic_read(&ipa_qmi_initialized) == 0)
ipa_qmi_service_init_worker();
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
index c41ddf4..227a12a 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
@@ -103,7 +103,7 @@
entry->hw_len = buf - start;
} else if (entry->hw_len != (buf - start)) {
IPAERR(
- "hw_len differs b/w passes passed=0x%x calc=0x%xtd\n",
+ "hw_len differs b/w passes passed=0x%x calc=0x%lxtd\n",
entry->hw_len,
(buf - start));
return -EPERM;
@@ -197,7 +197,7 @@
if (entry->hw_len == 0) {
entry->hw_len = buf - start;
} else if (entry->hw_len != (buf - start)) {
- IPAERR("hw_len differs b/w passes passed=0x%x calc=0x%xtd\n",
+ IPAERR("hw_len differs b/w passes passed=0x%x calc=0x%lxtd\n",
entry->hw_len, (buf - start));
return -EPERM;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index ffca1f5..72b2e96 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -287,14 +287,14 @@
};
static struct msm_bus_scale_pdata ipa_bus_client_pdata_v1_1 = {
- ipa_usecases_v1_1,
- ARRAY_SIZE(ipa_usecases_v1_1),
+ .usecase = ipa_usecases_v1_1,
+ .num_usecases = ARRAY_SIZE(ipa_usecases_v1_1),
.name = "ipa",
};
static struct msm_bus_scale_pdata ipa_bus_client_pdata_v2_0 = {
- ipa_usecases_v2_0,
- ARRAY_SIZE(ipa_usecases_v2_0),
+ .usecase = ipa_usecases_v2_0,
+ .num_usecases = ARRAY_SIZE(ipa_usecases_v2_0),
.name = "ipa",
};
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 7e55024..d91d7eb 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -45,7 +45,7 @@
#define IPA_RM_INACTIVITY_TIMER 100 /* IPA_RM */
#define HEADROOM_FOR_QMAP 8 /* for mux header */
#define TAILROOM 0 /* for padding by mux layer */
-#define MAX_NUM_OF_MUX_CHANNEL 10 /* max mux channels */
+#define MAX_NUM_OF_MUX_CHANNEL 15 /* max mux channels */
#define UL_FILTER_RULE_HANDLE_START 69
#define DEFAULT_OUTSTANDING_HIGH_CTL 96
#define DEFAULT_OUTSTANDING_HIGH 64
@@ -774,7 +774,7 @@
int i;
for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
- if (strcmp(mux_channel[i].vchannel_name, vchannel_name == 0))
+ if (strcmp(mux_channel[i].vchannel_name, vchannel_name) == 0)
return i;
}
return MAX_NUM_OF_MUX_CHANNEL;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index f994db5..8e8aaef 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -461,6 +461,8 @@
spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
ipa3_ctx->ipa3_active_clients_logging.log_rdy = 0;
+ kfree(active_clients_table_buf);
+ active_clients_table_buf = NULL;
kfree(ipa3_ctx->ipa3_active_clients_logging.log_buffer[0]);
ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
ipa3_ctx->ipa3_active_clients_logging.log_tail =
@@ -4974,7 +4976,8 @@
goto fail_clk;
/* init active_clients_log after getting ipa-clk */
- if (ipa3_active_clients_log_init())
+ result = ipa3_active_clients_log_init();
+ if (result)
goto fail_init_active_client;
/* Enable ipa3_ctx->enable_clock_scaling */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index ee312c7..3aaae8d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -2327,6 +2327,7 @@
IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_excp_pkts
[IPAHAL_PKT_STATUS_EXCEPTION_NONE]);
}
+ tx_pkt = NULL;
};
return rc;
@@ -2569,10 +2570,12 @@
struct ipa3_ep_context *ep;
unsigned int src_pipe;
u32 metadata;
+ u8 ucp;
ipahal_pkt_status_parse(rx_skb->data, &status);
src_pipe = status.endp_src_idx;
metadata = status.metadata;
+ ucp = status.ucp;
ep = &ipa3_ctx->ep[src_pipe];
if (unlikely(src_pipe >= ipa3_ctx->ipa_num_pipes ||
!ep->valid ||
@@ -2595,8 +2598,10 @@
* ------------------------------------------
*/
*(u16 *)rx_skb->cb = ((metadata >> 16) & 0xFFFF);
+ *(u8 *)(rx_skb->cb + 4) = ucp;
IPADBG_LOW("meta_data: 0x%x cb: 0x%x\n",
metadata, *(u32 *)rx_skb->cb);
+ IPADBG_LOW("ucp: %d\n", *(u8 *)(rx_skb->cb + 4));
ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index a37df7e..f885368 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -681,8 +681,17 @@
return -EINVAL;
}
- if (by_user)
+ if (by_user) {
+ if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
+ IPADBG("Trying to delete hdr %s offset=%u\n",
+ entry->name, entry->offset_entry->offset);
+ if (!entry->offset_entry->offset) {
+ IPAERR("User cannot delete default header\n");
+ return -EPERM;
+ }
+ }
entry->user_deleted = true;
+ }
if (--entry->ref_cnt) {
IPADBG("hdr_hdl %x ref_cnt %d\n", hdr_hdl, entry->ref_cnt);
@@ -981,13 +990,18 @@
/* do not remove the default header */
if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
- if (entry->is_hdr_proc_ctx) {
- IPAERR("default header is proc ctx\n");
- mutex_unlock(&ipa3_ctx->lock);
- WARN_ON_RATELIMIT_IPA(1);
- return -EFAULT;
+ IPADBG("Trying to remove hdr %s offset=%u\n",
+ entry->name, entry->offset_entry->offset);
+ if (!entry->offset_entry->offset) {
+ if (entry->is_hdr_proc_ctx) {
+ IPAERR("default header is proc ctx\n");
+ mutex_unlock(&ipa3_ctx->lock);
+ WARN_ON_RATELIMIT_IPA(1);
+ return -EFAULT;
+ }
+ IPADBG("skip default header\n");
+ continue;
}
- continue;
}
if (ipa3_id_find(entry->id) == NULL) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index 1c8715a..c158c94 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -610,7 +610,7 @@
IPAWANDBG("IPACM pass zero rules to Q6\n");
} else {
IPAWANDBG("IPACM pass %u rules to Q6\n",
- req->filter_spec_ex_list_len);
+ req->filter_spec_list_len);
}
if (req->filter_spec_list_len >= QMI_IPA_MAX_FILTERS_V01) {
@@ -919,7 +919,9 @@
req->source_pipe_index,
req->rule_id_len);
return -EINVAL;
- } else if (req->install_status != IPA_QMI_RESULT_SUCCESS_V01) {
+ }
+
+ if (req->install_status != IPA_QMI_RESULT_SUCCESS_V01) {
IPAWANERR(" UL filter rule for pipe %d install_status = %d\n",
req->source_pipe_index, req->install_status);
return -EINVAL;
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index cee0989..98a8594 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -45,7 +45,7 @@
#define IPA_RM_INACTIVITY_TIMER 100 /* IPA_RM */
#define HEADROOM_FOR_QMAP 8 /* for mux header */
#define TAILROOM 0 /* for padding by mux layer */
-#define MAX_NUM_OF_MUX_CHANNEL 10 /* max mux channels */
+#define MAX_NUM_OF_MUX_CHANNEL 15 /* max mux channels */
#define UL_FILTER_RULE_HANDLE_START 69
#define DEFAULT_OUTSTANDING_HIGH 128
#define DEFAULT_OUTSTANDING_HIGH_CTL (DEFAULT_OUTSTANDING_HIGH+32)
diff --git a/drivers/platform/msm/qpnp-revid.c b/drivers/platform/msm/qpnp-revid.c
index 0fec8ac..684aec8 100644
--- a/drivers/platform/msm/qpnp-revid.c
+++ b/drivers/platform/msm/qpnp-revid.c
@@ -59,6 +59,7 @@
[PM8937_SUBTYPE] = "PM8937",
[PM660L_SUBTYPE] = "PM660L",
[PM660_SUBTYPE] = "PM660",
+ [PMI632_SUBTYPE] = "PMI632",
[PMI8937_SUBTYPE] = "PMI8937",
};
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 2d5d9bf..a5cbb71 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -511,6 +511,16 @@
This driver provides support for the power supply features of
AXP20x PMIC.
+config NX30P6093
+ bool "NX30P6093 Moisture detection driver"
+ depends on I2C
+ help
+ Say Y here to enable the NX30P6093 Moisture detection peripheral.
+ The driver periodically configures NX30P6093 HW module to impedance
+ detection mode and handles the interrupts from NX30P6093 and force
+ usb_psy to disable CC detection on the event of any water/debris
+ detected at USBIN.
+
source "drivers/power/supply/qcom/Kconfig"
endif # POWER_SUPPLY
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index cfbc992..f87bca1 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -73,3 +73,4 @@
obj-$(CONFIG_AXP288_FUEL_GAUGE) += axp288_fuel_gauge.o
obj-$(CONFIG_ARCH_QCOM) += qcom/
obj-$(CONFIG_AXP288_CHARGER) += axp288_charger.o
+obj-$(CONFIG_NX30P6093) += nx30p6093.o
diff --git a/drivers/power/supply/nx30p6093.c b/drivers/power/supply/nx30p6093.c
new file mode 100644
index 0000000..d5c059b
--- /dev/null
+++ b/drivers/power/supply/nx30p6093.c
@@ -0,0 +1,688 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/alarmtimer.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeup.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+
+#define NX30P6093_ID_REG 0x0
+#define NX30P6093_VENDOR_ID_MASK GENMASK(7, 3)
+#define NX30P6093_VENDOR_ID_SHIFT 3
+#define NX30P6093_VERSION_ID_MASK GENMASK(2, 0)
+
+#define NX30P6093_ENABLE_REG 0x01
+#define NX30P6093_DETECT_EN BIT(6)
+
+#define NX30P6093_STATUS_REG 0x02
+#define NX30P6093_PWRON_STS BIT(7)
+#define NX30P6093_IMPEDANCE_MASK GENMASK(6, 5)
+#define NX30P6093_IMPEDANCE_SHIFT 5
+#define NX30P6093_IMPEDANCE_GOOD_VAL 1
+#define NX30P6093_IMPEDANCE_BAD_VAL 3
+
+#define NX30P6093_INTR_MASK_REG 0x04
+#define NX30P6093_OVER_TAG_STS_INTR_MASK BIT(6)
+#define NX30P6093_TMR_OUT_STS_INTR_MASK BIT(5)
+
+#define NX30P6093_VIN_ISOURCE_REG 0x06
+#define NX30P6093_VIN_ISOURCE_MASK GENMASK(3, 0)
+
+#define NX30P6093_ISOURCE_TIMING_REG 0x07
+#define NX30P6093_ISOURCE_TDET_MASK GENMASK(7, 4)
+#define NX30P6093_ISOURCE_TDET_SHIFT 4
+#define NX30P6093_ISOURCE_TDUTY_MASK GENMASK(3, 0)
+
+#define NX30P6093_VIN_VOLTAGE_TAG_REG 0x09
+#define NX30P6093_VIN_VOLTAGE_TAG_MASK GENMASK(7, 0)
+
+#define NX30P6093_SLEW_RATE_TUNE_REG 0x0f
+
+/* short duration is 5sec and long duration is 5hrs */
+#define NX30P6093_LONG_WAKEUP_SEC 18000
+#define NX30P6093_SHORT_WAKEUP_MS 5000
+
+/* Default Tduty = 5mins when always-on detection is configured */
+#define NX30P6093_ISOURCE_ALWAYS_ON_TDUTY_MS 300000
+
+/* configuration data */
+#define NX30P6093_VIN_ISOURCE_VAL 0xd
+#define NX30P6093_VIN_VOLTAGE_TAG_VAL 0xad
+#define NX30P6093_ISOURCE_TDET_VAL 0x5
+#define NX30P6093_ISOURCE_TDUTY_VAL 0x0
+#define NX30P6093_ISOURCE_TDET_MS 10
+
+struct nx30p6093_info {
+ struct device *dev;
+ struct regmap *regmap;
+ struct power_supply *usb_psy;
+ struct alarm alarm_timer;
+ struct delayed_work config_impedance_detect;
+ struct mutex lock;
+ struct dentry *debugfs;
+ u8 tduty_val;
+ int irq;
+
+ /* status data */
+ bool irq_waiting;
+ bool high_impedance;
+ bool detection_on;
+ bool always_on;
+ bool use_alarm;
+ bool suspended;
+
+ /* timer configuration */
+ u64 long_wakeup_ms;
+ u64 short_wakeup_ms;
+};
+
+static const int nx30p6093_tduty_ms[] = {0, 10, 20, 50, 100, 200, 500, 1000,
+ 2000, 3000, 6000, 12000, 30000, 60000,
+ 120000, 300000};
+
+static const struct regmap_config nx30p6093_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = NX30P6093_SLEW_RATE_TUNE_REG,
+};
+
+static int nx30p6093_dump_regs(struct nx30p6093_info *info)
+{
+ unsigned int val;
+ int i, rc = 0;
+
+ for (i = 0; i <= NX30P6093_SLEW_RATE_TUNE_REG; ++i) {
+ rc = regmap_read(info->regmap, i, &val);
+ if (rc < 0)
+ return rc;
+ pr_debug("NX30P6093(0x%02x) = 0x%02x\n", i, (uint8_t)val);
+ }
+
+ return rc;
+}
+
+static inline void nx30p6093_config_alarm(struct nx30p6093_info *info,
+ u64 wakeup_ms)
+{
+ if (!info->use_alarm)
+ return;
+
+ alarm_start_relative(&info->alarm_timer, ms_to_ktime(wakeup_ms));
+}
+
+static int nx30p6093_impedance_detect(struct nx30p6093_info *info, bool enable)
+{
+ int rc = 0;
+
+ if (enable == info->detection_on)
+ return rc;
+
+ rc = regmap_update_bits(info->regmap, NX30P6093_ENABLE_REG,
+ NX30P6093_DETECT_EN,
+ enable ? NX30P6093_DETECT_EN : 0);
+ if (rc < 0) {
+ pr_err("failed to %s VIN impedance detection, rc=%d\n",
+ enable ? "enable" : "disable", rc);
+ return rc;
+ }
+
+ /* wait for 3ms for HW activation and enters detection standby mode */
+ usleep_range(3000, 3100);
+
+ /* config Isource to VIN */
+ rc = regmap_update_bits(info->regmap, NX30P6093_VIN_ISOURCE_REG,
+ NX30P6093_VIN_ISOURCE_MASK,
+ enable ? NX30P6093_VIN_ISOURCE_VAL : 0);
+ if (rc < 0) {
+ pr_err("failed to configure Vin Isource register, rc=%d\n", rc);
+ return rc;
+ }
+
+ info->detection_on = enable;
+ nx30p6093_dump_regs(info);
+
+ return rc;
+}
+
+static int nx30p6093_read_impedance_status(struct nx30p6093_info *info)
+{
+ union power_supply_propval psp_val;
+ unsigned int val;
+ u8 impedance;
+ int rc;
+
+ /* Read status register */
+ rc = regmap_read(info->regmap, NX30P6093_STATUS_REG, &val);
+ if (rc < 0) {
+ pr_err("failed to read status register, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (val & NX30P6093_PWRON_STS) {
+ /* VBUS present */
+ return rc;
+ }
+
+ impedance = (val & NX30P6093_IMPEDANCE_MASK)
+ >> NX30P6093_IMPEDANCE_SHIFT;
+ if (impedance == NX30P6093_IMPEDANCE_GOOD_VAL && info->high_impedance) {
+ info->high_impedance = false;
+
+ /* enable the type-C CC detection */
+ psp_val.intval = 0;
+ rc = power_supply_set_property(info->usb_psy,
+ POWER_SUPPLY_PROP_MOISTURE_DETECTED,
+ &psp_val);
+ } else if (impedance == NX30P6093_IMPEDANCE_BAD_VAL) {
+ info->high_impedance = true;
+
+ /* disable the type-C CC detection */
+ psp_val.intval = 1;
+ rc = power_supply_set_property(info->usb_psy,
+ POWER_SUPPLY_PROP_MOISTURE_DETECTED,
+ &psp_val);
+ }
+
+ return rc;
+}
+
+static irqreturn_t nx30p6093_irq_handler(int irq, void *data)
+{
+ struct nx30p6093_info *info = data;
+
+ mutex_lock(&info->lock);
+
+ info->irq_waiting = true;
+ if (info->suspended) {
+ pr_debug("IRQ triggered before device-resume\n");
+ disable_irq_nosync(irq);
+ mutex_unlock(&info->lock);
+ return IRQ_HANDLED;
+ }
+ info->irq_waiting = false;
+ mutex_unlock(&info->lock);
+
+ nx30p6093_read_impedance_status(info);
+
+ if (info->high_impedance) {
+ disable_irq_nosync(irq);
+ /* set up next detection event */
+ nx30p6093_config_alarm(info,
+ NX30P6093_ISOURCE_ALWAYS_ON_TDUTY_MS);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int nx30p6093_trigger_impedance_detect(struct nx30p6093_info *info)
+{
+ int rc;
+
+ if (!info->always_on) {
+ rc = nx30p6093_impedance_detect(info, true);
+ if (rc < 0) {
+ pr_err("start impedance detection failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* wait for the detection complete(Tdet time). */
+ usleep_range(NX30P6093_ISOURCE_TDET_MS * USEC_PER_MSEC,
+ NX30P6093_ISOURCE_TDET_MS * USEC_PER_MSEC + 100);
+ }
+
+ /* Read and process the detection result. */
+ rc = nx30p6093_read_impedance_status(info);
+ if (rc < 0)
+ pr_err("impedance status read failed, rc=%d\n", rc);
+
+ if (!info->always_on) {
+ rc = nx30p6093_impedance_detect(info, false);
+ if (rc < 0)
+ pr_err("stop impedance detection failed, rc=%d\n", rc);
+ }
+
+ return rc;
+}
+
+static void nx30p6093_config_impedance_detect(struct work_struct *work)
+{
+ struct nx30p6093_info *info = container_of(work, struct nx30p6093_info,
+ config_impedance_detect.work);
+ u64 wakeup_ms = 0;
+
+ mutex_lock(&info->lock);
+ if (info->suspended) {
+ /*
+ * Defer the work as the device is still in suspend state and
+ * not yet resumed.
+ */
+ schedule_delayed_work(&info->config_impedance_detect,
+ msecs_to_jiffies(500));
+ mutex_unlock(&info->lock);
+ return;
+ }
+
+ nx30p6093_trigger_impedance_detect(info);
+
+ if (info->always_on) {
+ if (info->high_impedance) {
+ /*
+ * Bad impedance is not cleared yet.
+ * Set up a next detection event.
+ */
+ nx30p6093_config_alarm(info,
+ NX30P6093_ISOURCE_ALWAYS_ON_TDUTY_MS);
+ } else {
+ /* Bad impedance is cleared. Enable detection IRQ */
+ enable_irq(info->irq);
+ }
+ } else {
+ wakeup_ms = info->high_impedance ? info->short_wakeup_ms
+ : info->long_wakeup_ms;
+
+ /* Set up a next detection event */
+ nx30p6093_config_alarm(info, wakeup_ms);
+ }
+
+ mutex_unlock(&info->lock);
+ pm_relax(info->dev);
+}
+
+static enum alarmtimer_restart
+ nx30p6093_process_alarm_event(struct alarm *alarm, ktime_t now)
+{
+ struct nx30p6093_info *info = container_of(alarm, struct nx30p6093_info,
+ alarm_timer);
+ union power_supply_propval val;
+ int rc;
+
+ /* Read USB plugged-in */
+ rc = power_supply_get_property(info->usb_psy, POWER_SUPPLY_PROP_PRESENT,
+ &val);
+ if (rc < 0) {
+ pr_err("read usb present failed, rc=%d\n", rc);
+ return ALARMTIMER_RESTART;
+ }
+
+ if (val.intval) {
+ /*
+ * usb present - skip impedance detection and set up
+ * next detection event.
+ */
+ nx30p6093_config_alarm(info, info->long_wakeup_ms);
+ } else {
+ pm_stay_awake(info->dev);
+ schedule_delayed_work(&info->config_impedance_detect, 0);
+ }
+
+ return ALARMTIMER_NORESTART;
+}
+
+static int nx30p6093_init_config(struct nx30p6093_info *info)
+{
+ int rc;
+ u8 val;
+
+ /* Enable OVER_TAG_STATUS interrupt if always-on detection enabled */
+ rc = regmap_write(info->regmap, NX30P6093_INTR_MASK_REG,
+ info->always_on ? NX30P6093_OVER_TAG_STS_INTR_MASK
+ : 0);
+ if (rc < 0) {
+ pr_err("failed to enable timer out status interrupt, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* config Isource timing (Default: 10ms) */
+ val = NX30P6093_ISOURCE_TDET_VAL << NX30P6093_ISOURCE_TDET_SHIFT;
+ rc = regmap_update_bits(info->regmap, NX30P6093_ISOURCE_TIMING_REG,
+ NX30P6093_ISOURCE_TDET_MASK, val);
+ if (rc < 0) {
+ pr_err("failed to configure Isource timing, rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * config Isource Tduty timing;
+ * Default value:
+ * 1) One shot - if Periodic detection enabled
+ * 2) 5 mins - if Always-on detection enabled
+ */
+ rc = regmap_update_bits(info->regmap, NX30P6093_ISOURCE_TIMING_REG,
+ NX30P6093_ISOURCE_TDUTY_MASK,
+ info->always_on ? info->tduty_val
+ : NX30P6093_ISOURCE_TDUTY_VAL);
+ if (rc < 0) {
+ pr_err("failed to configure Isource Tduty timing, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* config VIN voltage tag (Default: 0xad) */
+ rc = regmap_update_bits(info->regmap, NX30P6093_VIN_VOLTAGE_TAG_REG,
+ NX30P6093_VIN_VOLTAGE_TAG_MASK,
+ NX30P6093_VIN_VOLTAGE_TAG_VAL);
+ if (rc < 0) {
+ pr_err("failed to configure Vin voltage tag register, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int nx30p6093_dt_init(struct i2c_client *client,
+ struct nx30p6093_info *info)
+{
+ struct device_node *of_node = client->dev.of_node;
+ int i, tduty_ms;
+ u32 long_wakeup_sec, short_wakeup_ms;
+
+ if (of_property_read_bool(of_node, "nxp,always-on-detect")) {
+ info->always_on = true;
+ tduty_ms = NX30P6093_ISOURCE_ALWAYS_ON_TDUTY_MS;
+ of_property_read_u32(of_node, "nxp,always-on-tduty-ms",
+ &tduty_ms);
+ for (i = 0; i < ARRAY_SIZE(nx30p6093_tduty_ms); i++) {
+ if (tduty_ms <= nx30p6093_tduty_ms[i]) {
+ info->tduty_val = (uint8_t)i;
+ break;
+ }
+ }
+
+ if (!info->tduty_val) {
+ pr_err("invalid nxp,always-on-tduty-ms = %d\n",
+ tduty_ms);
+ return -EINVAL;
+ }
+ } else {
+ long_wakeup_sec = NX30P6093_LONG_WAKEUP_SEC;
+ short_wakeup_ms = NX30P6093_SHORT_WAKEUP_MS;
+
+ of_property_read_u32(of_node, "nxp,long-wakeup-sec",
+ &long_wakeup_sec);
+ of_property_read_u32(of_node, "nxp,short-wakeup-ms",
+ &short_wakeup_ms);
+ if (!long_wakeup_sec || !short_wakeup_ms) {
+ pr_err("Invalid wakeup timings are configured\n");
+ return -EINVAL;
+ }
+
+ info->long_wakeup_ms = long_wakeup_sec * MSEC_PER_SEC;
+ info->short_wakeup_ms = short_wakeup_ms;
+ }
+
+ return 0;
+}
+
+static int nx30p6093_trigger_detect(struct seq_file *file, void *data)
+{
+ struct nx30p6093_info *info = file->private;
+
+ if (info->always_on)
+ return 0;
+
+ mutex_lock(&info->lock);
+ nx30p6093_trigger_impedance_detect(info);
+ seq_printf(file, "%s impedance detected\n",
+ info->high_impedance ? "BAD" : "GOOD");
+ mutex_unlock(&info->lock);
+
+ return 0;
+}
+
+static int nx30p6093_trigger_detect_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, nx30p6093_trigger_detect, inode->i_private);
+}
+
+static const struct file_operations nx30p6093_trigger_detect_fops = {
+ .owner = THIS_MODULE,
+ .open = nx30p6093_trigger_detect_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void nx30p6093_debugfs_init(struct nx30p6093_info *info)
+{
+ struct dentry *temp;
+
+ /* debugfs */
+ info->debugfs = debugfs_create_dir("nx30p6093", NULL);
+ if (!info->debugfs) {
+ pr_err("Couldn't create debug dir\n");
+ return;
+ }
+
+ temp = debugfs_create_file("trigger_detection", 0644, info->debugfs,
+ info, &nx30p6093_trigger_detect_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ pr_err("debugfs_nx30p6093_reg_addr_fops debugfs file creation failed\n");
+ debugfs_remove_recursive(info->debugfs);
+ }
+}
+
+#if CONFIG_PM
+static int nx30p6093_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct nx30p6093_info *info = i2c_get_clientdata(client);
+
+ cancel_delayed_work_sync(&info->config_impedance_detect);
+
+ mutex_lock(&info->lock);
+ info->suspended = true;
+ mutex_unlock(&info->lock);
+
+ return 0;
+}
+
+static int nx30p6093_suspend_noirq(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct nx30p6093_info *info = i2c_get_clientdata(client);
+
+ if (info->irq_waiting) {
+ pr_err_ratelimited("Aborting suspend, an interrupt was detected while suspending\n");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int nx30p6093_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct nx30p6093_info *info = i2c_get_clientdata(client);
+
+ mutex_lock(&info->lock);
+ info->suspended = false;
+ if (info->irq_waiting) {
+ mutex_unlock(&info->lock);
+ nx30p6093_irq_handler(client->irq, info);
+ enable_irq(client->irq);
+ } else {
+ mutex_unlock(&info->lock);
+ }
+
+ return 0;
+}
+#else
+static int nx30p6093_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int nx30p6093_resume(struct device *dev)
+{
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops nx30p6093_pm_ops = {
+ .suspend = nx30p6093_suspend,
+ .suspend_noirq = nx30p6093_suspend_noirq,
+ .resume = nx30p6093_resume,
+};
+
+static int nx30p6093_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct nx30p6093_info *info;
+ unsigned int val;
+ int vendor_id, version_id, rc = 0;
+
+ info = devm_kzalloc(&client->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = &client->dev;
+ info->regmap = devm_regmap_init_i2c(client, &nx30p6093_regmap_config);
+ if (IS_ERR(info->regmap)) {
+ pr_err("Error in allocating regmap, rc=%ld\n",
+ PTR_ERR(info->regmap));
+ return PTR_ERR(info->regmap);
+ }
+
+ rc = regmap_read(info->regmap, NX30P6093_ID_REG, &val);
+ if (rc < 0) {
+ pr_err("Unable to identify NX30P6093, rc=%d\n", rc);
+ return rc;
+ }
+ vendor_id = (val & NX30P6093_VENDOR_ID_MASK)
+ >> NX30P6093_VENDOR_ID_SHIFT;
+ version_id = val & NX30P6093_VERSION_ID_MASK;
+
+ info->usb_psy = power_supply_get_by_name("usb");
+ if (!info->usb_psy) {
+ pr_err("USB psy not found\n");
+ return -EPROBE_DEFER;
+ }
+
+ i2c_set_clientdata(client, info);
+ mutex_init(&info->lock);
+ INIT_DELAYED_WORK(&info->config_impedance_detect,
+ nx30p6093_config_impedance_detect);
+
+ rc = nx30p6093_dt_init(client, info);
+ if (rc < 0) {
+ pr_err("device tree parsing failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = nx30p6093_init_config(info);
+ if (rc < 0) {
+ pr_err("initial configuration programming failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (alarmtimer_get_rtcdev()) {
+ /* Initialize alarm timer */
+ info->use_alarm = true;
+ alarm_init(&info->alarm_timer, ALARM_BOOTTIME,
+ nx30p6093_process_alarm_event);
+ } else {
+ pr_err("alarm initialization failed\n");
+ return -ENODEV;
+ }
+
+ if (info->always_on) {
+ /* Moisture detect irq configuration */
+ if (client->irq) {
+ info->irq = client->irq;
+ rc = devm_request_threaded_irq(&client->dev,
+ client->irq, NULL,
+ nx30p6093_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
+ client->name, info);
+ if (rc < 0) {
+ pr_err("Failed Moisture detect irq(%d) request, rc=%d\n",
+ client->irq, rc);
+ return rc;
+ }
+ enable_irq_wake(client->irq);
+ } else {
+ pr_err("Moisture detect irq not defined\n");
+ return -EINVAL;
+ }
+
+ nx30p6093_impedance_detect(info, true);
+ } else {
+ /* Run impedance detection for first time */
+ pm_stay_awake(info->dev);
+ schedule_delayed_work(&info->config_impedance_detect, 0);
+ }
+
+ nx30p6093_debugfs_init(info);
+
+ if (info->always_on)
+ pr_info("NXP NX30P6093 Vendor(%d), Version(%d), configured to Always-on detection\n",
+ vendor_id, version_id);
+ else
+ pr_info("NXP NX30P6093 Vendor(%d), Version(%d), configured to periodic detection with Short_wakeup = %llu ms and Long_wakeup = %llu sec\n",
+ vendor_id, version_id, info->short_wakeup_ms,
+ info->long_wakeup_ms / MSEC_PER_SEC);
+
+ return 0;
+}
+
+static int nx30p6093_remove(struct i2c_client *client)
+{
+ struct nx30p6093_info *info = i2c_get_clientdata(client);
+
+ debugfs_remove_recursive(info->debugfs);
+ cancel_delayed_work_sync(&info->config_impedance_detect);
+
+ if (info->use_alarm)
+ alarm_cancel(&info->alarm_timer);
+
+ return 0;
+}
+
+static const struct of_device_id nx30p6093_table[] = {
+ { .compatible = "nxp,nx30p6093" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, nx30p6093_table);
+
+static const struct i2c_device_id nx30p6093_id[] = {
+ {"nx30p6093", -1},
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, nx30p6093_id);
+
+static struct i2c_driver nx30p6093_driver = {
+ .driver = {
+ .name = "nxp,nx30p6093",
+ .owner = THIS_MODULE,
+ .of_match_table = nx30p6093_table,
+ .pm = &nx30p6093_pm_ops,
+ },
+ .probe = nx30p6093_probe,
+ .remove = nx30p6093_remove,
+ .id_table = nx30p6093_id,
+};
+module_i2c_driver(nx30p6093_driver);
+
+MODULE_DESCRIPTION("NX30P6093 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index ee54efc..819fbf0 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -322,6 +322,7 @@
POWER_SUPPLY_ATTR(connector_type),
POWER_SUPPLY_ATTR(parallel_batfet_mode),
POWER_SUPPLY_ATTR(min_icl),
+ POWER_SUPPLY_ATTR(moisture_detected),
/* Local extensions of type int64_t */
POWER_SUPPLY_ATTR(charge_counter_ext),
/* Properties of type `const char *' */
diff --git a/drivers/power/supply/qcom/fg-memif.c b/drivers/power/supply/qcom/fg-memif.c
index d9b5ad7..bc21b46 100644
--- a/drivers/power/supply/qcom/fg-memif.c
+++ b/drivers/power/supply/qcom/fg-memif.c
@@ -777,6 +777,12 @@
if (!request)
return 0;
+ /*
+ * HW takes 5 cycles (200 KHz clock) to grant access after requesting
+ * for DMA. Wait for 40 us before polling for MEM_GNT first time.
+ */
+ usleep_range(40, 41);
+
while (i < MEM_GNT_RETRIES) {
rc = fg_read(chip, MEM_IF_INT_RT_STS(chip), &val, 1);
if (rc < 0) {
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index 8c53b2e..afa128d 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -2093,8 +2093,12 @@
return 0;
}
} else {
- /* Charging, do nothing */
- return 0;
+ if (!chip->recharge_soc_adjusted)
+ return 0;
+
+ /* Restore the default value */
+ new_recharge_soc = recharge_soc;
+ chip->recharge_soc_adjusted = false;
}
} else {
/* Restore the default value */
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index 8536a61..74e80cd 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -360,6 +360,7 @@
POWER_SUPPLY_PROP_PD_VOLTAGE_MIN,
POWER_SUPPLY_PROP_SDP_CURRENT_MAX,
POWER_SUPPLY_PROP_CONNECTOR_TYPE,
+ POWER_SUPPLY_PROP_MOISTURE_DETECTED,
};
static int smb2_usb_get_prop(struct power_supply *psy,
@@ -478,6 +479,10 @@
case POWER_SUPPLY_PROP_CONNECTOR_TYPE:
val->intval = chg->connector_type;
break;
+ case POWER_SUPPLY_PROP_MOISTURE_DETECTED:
+ val->intval = get_client_vote(chg->disable_power_role_switch,
+ MOISTURE_VOTER);
+ break;
default:
pr_err("get prop %d is not supported in usb\n", psp);
rc = -EINVAL;
@@ -500,7 +505,16 @@
mutex_lock(&chg->lock);
if (!chg->typec_present) {
- rc = -EINVAL;
+ switch (psp) {
+ case POWER_SUPPLY_PROP_MOISTURE_DETECTED:
+ vote(chg->disable_power_role_switch, MOISTURE_VOTER,
+ val->intval > 0, 0);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
goto unlock;
}
@@ -1559,20 +1573,12 @@
BATT_PROFILE_VOTER, true, chg->batt_profile_fv_uv);
vote(chg->dc_icl_votable,
DEFAULT_VOTER, true, chip->dt.dc_icl_ua);
- vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER,
- true, 0);
- vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
- true, 0);
vote(chg->hvdcp_disable_votable_indirect, DEFAULT_VOTER,
chip->dt.hvdcp_disable, 0);
vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER,
true, 0);
vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
true, 0);
- vote(chg->pd_disallowed_votable_indirect, MICRO_USB_VOTER,
- (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB), 0);
- vote(chg->hvdcp_enable_votable, MICRO_USB_VOTER,
- (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB), 0);
/*
* AICL configuration:
@@ -1622,6 +1628,16 @@
return rc;
}
+ /* Connector types based votes */
+ vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER,
+ (chg->connector_type == POWER_SUPPLY_CONNECTOR_TYPEC), 0);
+ vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
+ (chg->connector_type == POWER_SUPPLY_CONNECTOR_TYPEC), 0);
+ vote(chg->pd_disallowed_votable_indirect, MICRO_USB_VOTER,
+ (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB), 0);
+ vote(chg->hvdcp_enable_votable, MICRO_USB_VOTER,
+ (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB), 0);
+
/* configure VCONN for software control */
rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
VCONN_EN_SRC_BIT | VCONN_EN_VALUE_BIT,
diff --git a/drivers/power/supply/qcom/qpnp-typec.c b/drivers/power/supply/qcom/qpnp-typec.c
index 3c74be0..12aa16b 100644
--- a/drivers/power/supply/qcom/qpnp-typec.c
+++ b/drivers/power/supply/qcom/qpnp-typec.c
@@ -18,6 +18,7 @@
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/regmap.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
@@ -26,7 +27,6 @@
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/machine.h>
#include <linux/slab.h>
-#include <linux/spmi.h>
#include <linux/usb/class-dual-role.h>
#define CREATE_MASK(NUM_BITS, POS) \
@@ -88,9 +88,10 @@
struct qpnp_typec_chip {
struct device *dev;
- struct spmi_device *spmi;
+ struct regmap *regmap;
struct power_supply *batt_psy;
- struct power_supply type_c_psy;
+ struct power_supply *typec_psy;
+ struct power_supply_desc typec_psy_desc;
struct regulator *ss_mux_vreg;
struct mutex typec_lock;
spinlock_t rw_lock;
@@ -128,29 +129,28 @@
"ufp", "dfp", "none"
};
-/* SPMI operations */
-static int __qpnp_typec_read(struct spmi_device *spmi, u8 *val, u16 addr,
+/* SPMI Read/Write operations */
+static int __qpnp_typec_read(struct qpnp_typec_chip *chip, u8 *val, u16 addr,
int count)
{
int rc;
- rc = spmi_ext_register_readl(spmi->ctrl, spmi->sid, addr, val, count);
+ rc = regmap_bulk_read(chip->regmap, addr, val, count);
if (rc)
- pr_err("spmi read failed addr=0x%02x sid=0x%02x rc=%d\n",
- addr, spmi->sid, rc);
-
+ pr_err("spmi read failed addr=0x%02x rc=%d\n",
+ addr, rc);
return rc;
}
-static int __qpnp_typec_write(struct spmi_device *spmi, u8 *val, u16 addr,
+static int __qpnp_typec_write(struct qpnp_typec_chip *chip, u8 *val, u16 addr,
int count)
{
int rc;
- rc = spmi_ext_register_writel(spmi->ctrl, spmi->sid, addr, val, count);
+ rc = regmap_bulk_write(chip->regmap, addr, val, count);
if (rc)
- pr_err("spmi write failed addr=0x%02x sid=0x%02x rc=%d\n",
- addr, spmi->sid, rc);
+ pr_err("spmi write failed addr=0x%02x rc=%d\n",
+ addr, rc);
return rc;
}
@@ -159,16 +159,14 @@
{
int rc;
unsigned long flags;
- struct spmi_device *spmi = chip->spmi;
if (addr == 0) {
- pr_err("addr cannot be zero addr=0x%02x sid=0x%02x\n",
- addr, spmi->sid);
+ pr_err("addr cannot be zero addr=0x%02x\n", addr);
return -EINVAL;
}
spin_lock_irqsave(&chip->rw_lock, flags);
- rc = __qpnp_typec_read(spmi, val, addr, count);
+ rc = __qpnp_typec_read(chip, val, addr, count);
spin_unlock_irqrestore(&chip->rw_lock, flags);
return rc;
@@ -180,10 +178,9 @@
u8 reg;
int rc;
unsigned long flags;
- struct spmi_device *spmi = chip->spmi;
spin_lock_irqsave(&chip->rw_lock, flags);
- rc = __qpnp_typec_read(spmi, ®, base, 1);
+ rc = __qpnp_typec_read(chip, ®, base, 1);
if (rc) {
pr_err("spmi read failed: addr=%03X, rc=%d\n", base, rc);
goto out;
@@ -194,7 +191,7 @@
pr_debug("addr = 0x%x writing 0x%x\n", base, reg);
- rc = __qpnp_typec_write(spmi, ®, base, 1);
+ rc = __qpnp_typec_write(chip, ®, base, 1);
if (rc) {
pr_err("spmi write failed: addr=%03X, rc=%d\n", base, rc);
goto out;
@@ -224,7 +221,7 @@
switch (prop) {
case POWER_SUPPLY_PROP_CURRENT_CAPABILITY:
ret.intval = chip->current_ma;
- rc = chip->batt_psy->set_property(chip->batt_psy,
+ rc = chip->batt_psy->desc->set_property(chip->batt_psy,
POWER_SUPPLY_PROP_CURRENT_CAPABILITY, &ret);
if (rc)
pr_err("failed to set current max rc=%d\n", rc);
@@ -236,7 +233,7 @@
* charger driver.
*/
ret.intval = chip->typec_state;
- rc = chip->batt_psy->set_property(chip->batt_psy,
+ rc = chip->batt_psy->desc->set_property(chip->batt_psy,
POWER_SUPPLY_PROP_TYPEC_MODE, &ret);
if (rc)
pr_err("failed to set typec mode rc=%d\n", rc);
@@ -369,7 +366,7 @@
chip->cc_line_state = OPEN;
chip->current_ma = 0;
chip->typec_state = POWER_SUPPLY_TYPE_UNKNOWN;
- chip->type_c_psy.type = POWER_SUPPLY_TYPE_UNKNOWN;
+ chip->typec_psy_desc.type = POWER_SUPPLY_TYPE_UNKNOWN;
rc = set_property_on_battery(chip, POWER_SUPPLY_PROP_TYPEC_MODE);
if (rc)
pr_err("failed to set TYPEC MODE on battery psy rc=%d\n", rc);
@@ -458,7 +455,7 @@
chip->current_ma = get_max_current(reg & TYPEC_CURRENT_MASK);
/* device in UFP state */
chip->typec_state = POWER_SUPPLY_TYPE_UFP;
- chip->type_c_psy.type = POWER_SUPPLY_TYPE_UFP;
+ chip->typec_psy_desc.type = POWER_SUPPLY_TYPE_UFP;
rc = set_property_on_battery(chip, POWER_SUPPLY_PROP_TYPEC_MODE);
if (rc)
pr_err("failed to set TYPEC MODE on battery psy rc=%d\n", rc);
@@ -514,7 +511,7 @@
}
chip->typec_state = POWER_SUPPLY_TYPE_DFP;
- chip->type_c_psy.type = POWER_SUPPLY_TYPE_DFP;
+ chip->typec_psy_desc.type = POWER_SUPPLY_TYPE_DFP;
chip->current_ma = 0;
rc = set_property_on_battery(chip,
POWER_SUPPLY_PROP_TYPEC_MODE);
@@ -618,7 +615,7 @@
chip->cc_line_state = OPEN;
chip->typec_state = POWER_SUPPLY_TYPE_UNKNOWN;
- chip->type_c_psy.type = POWER_SUPPLY_TYPE_UNKNOWN;
+ chip->typec_psy_desc.type = POWER_SUPPLY_TYPE_UNKNOWN;
if (rt_reg & DFP_DETECT_BIT) {
/* we are in DFP state*/
@@ -631,43 +628,44 @@
return 0;
}
-#define REQUEST_IRQ(chip, irq, irq_name, irq_handler, flags, wake, rc) \
-do { \
- irq = spmi_get_irq_byname(chip->spmi, NULL, irq_name); \
- if (irq < 0) { \
- pr_err("Unable to get " irq_name " irq\n"); \
- rc |= -ENXIO; \
- } \
- rc = devm_request_threaded_irq(chip->dev, \
- irq, NULL, irq_handler, flags, irq_name, \
- chip); \
- if (rc < 0) { \
- pr_err("Unable to request " irq_name " irq: %d\n", rc); \
- rc |= -ENXIO; \
- } \
- \
- if (wake) \
- enable_irq_wake(irq); \
+#define REQUEST_IRQ(chip, pdev, irq, irq_name, irq_handler, flags, wake, rc) \
+do { \
+ irq = platform_get_irq_byname(pdev, irq_name); \
+ if (irq < 0) { \
+ pr_err("Unable to get " irq_name " irq\n"); \
+ rc |= -ENXIO; \
+ } \
+ rc = devm_request_threaded_irq(chip->dev, \
+ irq, NULL, irq_handler, flags, irq_name, \
+ chip); \
+ if (rc < 0) { \
+ pr_err("Unable to request " irq_name " irq: %d\n", rc); \
+ rc |= -ENXIO; \
+ } \
+ \
+ if (wake) \
+ enable_irq_wake(irq); \
} while (0)
-static int qpnp_typec_request_irqs(struct qpnp_typec_chip *chip)
+static int qpnp_typec_request_irqs(struct qpnp_typec_chip *chip,
+ struct platform_device *pdev)
{
int rc = 0;
unsigned long flags = IRQF_TRIGGER_RISING | IRQF_ONESHOT;
- REQUEST_IRQ(chip, chip->vrd_changed, "vrd-change", vrd_changed_handler,
+ REQUEST_IRQ(chip, pdev, chip->vrd_changed, "vrd-change",
+ vrd_changed_handler, flags, true, rc);
+ REQUEST_IRQ(chip, pdev, chip->ufp_detach, "ufp-detach",
+ ufp_detach_handler, flags, true, rc);
+ REQUEST_IRQ(chip, pdev, chip->ufp_detect, "ufp-detect",
+ ufp_detect_handler, flags, true, rc);
+ REQUEST_IRQ(chip, pdev, chip->dfp_detach, "dfp-detach",
+ dfp_detach_handler, flags, true, rc);
+ REQUEST_IRQ(chip, pdev, chip->dfp_detect, "dfp-detect",
+ dfp_detect_handler, flags, true, rc);
+ REQUEST_IRQ(chip, pdev, chip->vbus_err, "vbus-err", vbus_err_handler,
flags, true, rc);
- REQUEST_IRQ(chip, chip->ufp_detach, "ufp-detach", ufp_detach_handler,
- flags, true, rc);
- REQUEST_IRQ(chip, chip->ufp_detect, "ufp-detect", ufp_detect_handler,
- flags, true, rc);
- REQUEST_IRQ(chip, chip->dfp_detach, "dfp-detach", dfp_detach_handler,
- flags, true, rc);
- REQUEST_IRQ(chip, chip->dfp_detect, "dfp-detect", dfp_detect_handler,
- flags, true, rc);
- REQUEST_IRQ(chip, chip->vbus_err, "vbus-err", vbus_err_handler,
- flags, true, rc);
- REQUEST_IRQ(chip, chip->vconn_oc, "vconn-oc", vconn_oc_handler,
+ REQUEST_IRQ(chip, pdev, chip->vconn_oc, "vconn-oc", vconn_oc_handler,
flags, true, rc);
return rc;
@@ -682,8 +680,7 @@
enum power_supply_property prop,
union power_supply_propval *val)
{
- struct qpnp_typec_chip *chip = container_of(psy,
- struct qpnp_typec_chip, type_c_psy);
+ struct qpnp_typec_chip *chip = power_supply_get_drvdata(psy);
switch (prop) {
case POWER_SUPPLY_PROP_TYPE:
@@ -869,25 +866,32 @@
return 0;
}
-static int qpnp_typec_probe(struct spmi_device *spmi)
+static int qpnp_typec_probe(struct platform_device *pdev)
{
int rc;
- struct resource *resource;
+ unsigned int base;
struct qpnp_typec_chip *chip;
+ struct power_supply_config typec_psy_cfg;
- resource = spmi_get_resource(spmi, NULL, IORESOURCE_MEM, 0);
- if (!resource) {
- pr_err("Unable to get spmi resource for TYPEC\n");
- return -EINVAL;
- }
-
- chip = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_typec_chip),
+ chip = devm_kzalloc(&pdev->dev, sizeof(struct qpnp_typec_chip),
GFP_KERNEL);
if (!chip)
return -ENOMEM;
- chip->dev = &spmi->dev;
- chip->spmi = spmi;
+ rc = of_property_read_u32(pdev->dev.of_node, "reg", &base);
+ if (rc < 0) {
+ pr_err("reg property reading falied, rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ chip->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!chip->regmap) {
+ pr_err("Couldn't get parent's regmap\n");
+ return -EINVAL;
+ }
+
+ chip->dev = &pdev->dev;
/* parse DT */
rc = qpnp_typec_parse_dt(chip);
@@ -896,9 +900,9 @@
return rc;
}
- chip->base = resource->start;
- dev_set_drvdata(&spmi->dev, chip);
- device_init_wakeup(&spmi->dev, 1);
+ chip->base = base;
+ dev_set_drvdata(&pdev->dev, chip);
+ device_init_wakeup(&pdev->dev, 1);
mutex_init(&chip->typec_lock);
spin_lock_init(&chip->rw_lock);
@@ -909,16 +913,18 @@
goto out;
}
- chip->type_c_psy.name = TYPEC_PSY_NAME;
- chip->type_c_psy.get_property = qpnp_typec_get_property;
- chip->type_c_psy.properties = qpnp_typec_properties;
- chip->type_c_psy.num_properties = ARRAY_SIZE(qpnp_typec_properties);
+ chip->typec_psy_desc.name = TYPEC_PSY_NAME;
+ chip->typec_psy_desc.get_property = qpnp_typec_get_property;
+ chip->typec_psy_desc.properties = qpnp_typec_properties;
+ chip->typec_psy_desc.num_properties
+ = ARRAY_SIZE(qpnp_typec_properties);
- rc = power_supply_register(chip->dev, &chip->type_c_psy);
- if (rc < 0) {
- pr_err("Unable to register type_c_psy rc=%d\n", rc);
- goto out;
- }
+ typec_psy_cfg.drv_data = chip;
+ typec_psy_cfg.of_node = NULL;
+ typec_psy_cfg.supplied_to = NULL;
+ typec_psy_cfg.num_supplicants = 0;
+ chip->typec_psy = power_supply_register(chip->dev,
+ &chip->typec_psy_desc, &typec_psy_cfg);
if (chip->role_reversal_supported) {
chip->force_mode = DUAL_ROLE_PROP_MODE_NONE;
@@ -950,7 +956,7 @@
}
/* All irqs */
- rc = qpnp_typec_request_irqs(chip);
+ rc = qpnp_typec_request_irqs(chip, pdev);
if (rc) {
pr_err("failed to request irqs rc=%d\n", rc);
goto unregister_psy;
@@ -961,7 +967,7 @@
return 0;
unregister_psy:
- power_supply_unregister(&chip->type_c_psy);
+ power_supply_unregister(chip->typec_psy);
out:
mutex_destroy(&chip->typec_lock);
if (chip->role_reversal_supported)
@@ -969,10 +975,10 @@
return rc;
}
-static int qpnp_typec_remove(struct spmi_device *spmi)
+static int qpnp_typec_remove(struct platform_device *pdev)
{
int rc;
- struct qpnp_typec_chip *chip = dev_get_drvdata(&spmi->dev);
+ struct qpnp_typec_chip *chip = dev_get_drvdata(&pdev->dev);
if (chip->role_reversal_supported) {
cancel_delayed_work_sync(&chip->role_reversal_check);
@@ -989,11 +995,13 @@
}
static const struct of_device_id qpnp_typec_match_table[] = {
- { .compatible = QPNP_TYPEC_DEV_NAME, },
- {}
+ { .compatible = QPNP_TYPEC_DEV_NAME },
+ { },
};
-static struct spmi_driver qpnp_typec_driver = {
+MODULE_DEVICE_TABLE(of, qpnp_typec_match_table);
+
+static struct platform_driver qpnp_typec_driver = {
.probe = qpnp_typec_probe,
.remove = qpnp_typec_remove,
.driver = {
@@ -1008,13 +1016,13 @@
*/
static int __init qpnp_typec_init(void)
{
- return spmi_driver_register(&qpnp_typec_driver);
+ return platform_driver_register(&qpnp_typec_driver);
}
module_init(qpnp_typec_init);
static void __exit qpnp_typec_exit(void)
{
- spmi_driver_unregister(&qpnp_typec_driver);
+ platform_driver_unregister(&qpnp_typec_driver);
}
module_exit(qpnp_typec_exit);
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 1e5e136..4656e35 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -1009,6 +1009,101 @@
return 0;
}
+static int smblib_micro_usb_disable_power_role_switch(struct smb_charger *chg,
+ bool disable)
+{
+ int rc = 0;
+ u8 power_role;
+
+ power_role = disable ? TYPEC_DISABLE_CMD_BIT : 0;
+ /* Disable pullup on CC1_ID pin and stop detection on CC pins */
+ rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+ (uint8_t)TYPEC_POWER_ROLE_CMD_MASK,
+ power_role);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't write 0x%02x to TYPE_C_INTRPT_ENB_SOFTWARE_CTRL rc=%d\n",
+ power_role, rc);
+ return rc;
+ }
+
+ if (disable) {
+ /* configure TypeC mode */
+ rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
+ TYPE_C_OR_U_USB_BIT, 0);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't configure typec mode rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* wait for FSM to enter idle state */
+ usleep_range(5000, 5100);
+
+ /* configure micro USB mode */
+ rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
+ TYPE_C_OR_U_USB_BIT,
+ TYPE_C_OR_U_USB_BIT);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't configure micro USB mode rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+static int __smblib_set_prop_typec_power_role(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ int rc = 0;
+ u8 power_role;
+
+ switch (val->intval) {
+ case POWER_SUPPLY_TYPEC_PR_NONE:
+ power_role = TYPEC_DISABLE_CMD_BIT;
+ break;
+ case POWER_SUPPLY_TYPEC_PR_DUAL:
+ power_role = 0;
+ break;
+ case POWER_SUPPLY_TYPEC_PR_SINK:
+ power_role = UFP_EN_CMD_BIT;
+ break;
+ case POWER_SUPPLY_TYPEC_PR_SOURCE:
+ power_role = DFP_EN_CMD_BIT;
+ break;
+ default:
+ smblib_err(chg, "power role %d not supported\n", val->intval);
+ return -EINVAL;
+ }
+
+ if (power_role == UFP_EN_CMD_BIT) {
+ /* disable PBS workaround when forcing sink mode */
+ rc = smblib_write(chg, TM_IO_DTEST4_SEL, 0x0);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't write to TM_IO_DTEST4_SEL rc=%d\n",
+ rc);
+ }
+ } else {
+ /* restore it back to 0xA5 */
+ rc = smblib_write(chg, TM_IO_DTEST4_SEL, 0xA5);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't write to TM_IO_DTEST4_SEL rc=%d\n",
+ rc);
+ }
+ }
+
+ rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+ TYPEC_POWER_ROLE_CMD_MASK, power_role);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't write 0x%02x to TYPE_C_INTRPT_ENB_SOFTWARE_CTRL rc=%d\n",
+ power_role, rc);
+ return rc;
+ }
+
+ return rc;
+}
+
/*********************
* VOTABLE CALLBACKS *
*********************/
@@ -1249,6 +1344,31 @@
return 0;
}
+static int smblib_disable_power_role_switch_callback(struct votable *votable,
+ void *data, int disable, const char *client)
+{
+ struct smb_charger *chg = data;
+ union power_supply_propval pval;
+ int rc = 0;
+
+ if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB) {
+ rc = smblib_micro_usb_disable_power_role_switch(chg, disable);
+ } else {
+ pval.intval = disable ? POWER_SUPPLY_TYPEC_PR_SINK
+ : POWER_SUPPLY_TYPEC_PR_DUAL;
+ rc = __smblib_set_prop_typec_power_role(chg, &pval);
+ }
+
+ if (rc)
+ smblib_err(chg, "power_role_switch = %s failed, rc=%d\n",
+ disable ? "disabled" : "enabled", rc);
+ else
+ smblib_dbg(chg, PR_MISC, "power_role_switch = %s\n",
+ disable ? "disabled" : "enabled");
+
+ return rc;
+}
+
/*******************
* VCONN REGULATOR *
* *****************/
@@ -2681,52 +2801,11 @@
int smblib_set_prop_typec_power_role(struct smb_charger *chg,
const union power_supply_propval *val)
{
- int rc = 0;
- u8 power_role;
+ /* Check if power role switch is disabled */
+ if (!get_effective_result(chg->disable_power_role_switch))
+ return __smblib_set_prop_typec_power_role(chg, val);
- switch (val->intval) {
- case POWER_SUPPLY_TYPEC_PR_NONE:
- power_role = TYPEC_DISABLE_CMD_BIT;
- break;
- case POWER_SUPPLY_TYPEC_PR_DUAL:
- power_role = 0;
- break;
- case POWER_SUPPLY_TYPEC_PR_SINK:
- power_role = UFP_EN_CMD_BIT;
- break;
- case POWER_SUPPLY_TYPEC_PR_SOURCE:
- power_role = DFP_EN_CMD_BIT;
- break;
- default:
- smblib_err(chg, "power role %d not supported\n", val->intval);
- return -EINVAL;
- }
-
- if (power_role == UFP_EN_CMD_BIT) {
- /* disable PBS workaround when forcing sink mode */
- rc = smblib_write(chg, TM_IO_DTEST4_SEL, 0x0);
- if (rc < 0) {
- smblib_err(chg, "Couldn't write to TM_IO_DTEST4_SEL rc=%d\n",
- rc);
- }
- } else {
- /* restore it back to 0xA5 */
- rc = smblib_write(chg, TM_IO_DTEST4_SEL, 0xA5);
- if (rc < 0) {
- smblib_err(chg, "Couldn't write to TM_IO_DTEST4_SEL rc=%d\n",
- rc);
- }
- }
-
- rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
- TYPEC_POWER_ROLE_CMD_MASK, power_role);
- if (rc < 0) {
- smblib_err(chg, "Couldn't write 0x%02x to TYPE_C_INTRPT_ENB_SOFTWARE_CTRL rc=%d\n",
- power_role, rc);
- return rc;
- }
-
- return rc;
+ return 0;
}
int smblib_set_prop_pd_voltage_min(struct smb_charger *chg,
@@ -4022,6 +4101,7 @@
int rc;
struct smb_irq_data *data;
struct storm_watch *wdata;
+ union power_supply_propval val;
chg->cc2_detach_wa_active = false;
@@ -4130,8 +4210,8 @@
chg->is_audio_adapter = false;
/* enable DRP */
- rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
- TYPEC_POWER_ROLE_CMD_MASK, 0);
+ val.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+ rc = smblib_set_prop_typec_power_role(chg, &val);
if (rc < 0)
smblib_err(chg, "Couldn't enable DRP rc=%d\n", rc);
@@ -5017,6 +5097,16 @@
return rc;
}
+ chg->disable_power_role_switch
+ = create_votable("DISABLE_POWER_ROLE_SWITCH",
+ VOTE_SET_ANY,
+ smblib_disable_power_role_switch_callback,
+ chg);
+ if (IS_ERR(chg->disable_power_role_switch)) {
+ rc = PTR_ERR(chg->disable_power_role_switch);
+ return rc;
+ }
+
return rc;
}
@@ -5042,6 +5132,8 @@
destroy_votable(chg->hvdcp_hw_inov_dis_votable);
if (chg->typec_irq_disable_votable)
destroy_votable(chg->typec_irq_disable_votable);
+ if (chg->disable_power_role_switch)
+ destroy_votable(chg->disable_power_role_switch);
}
static void smblib_iio_deinit(struct smb_charger *chg)
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index 1154b09..00a4086 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -68,6 +68,7 @@
#define OTG_VOTER "OTG_VOTER"
#define PL_FCC_LOW_VOTER "PL_FCC_LOW_VOTER"
#define WBC_VOTER "WBC_VOTER"
+#define MOISTURE_VOTER "MOISTURE_VOTER"
#define VCONN_MAX_ATTEMPTS 3
#define OTG_MAX_ATTEMPTS 3
@@ -292,6 +293,7 @@
struct votable *hvdcp_hw_inov_dis_votable;
struct votable *usb_irq_enable_votable;
struct votable *typec_irq_disable_votable;
+ struct votable *disable_power_role_switch;
/* work */
struct work_struct bms_update_work;
diff --git a/drivers/power/supply/qcom/smb1355-charger.c b/drivers/power/supply/qcom/smb1355-charger.c
index 833a8da..92f943f 100644
--- a/drivers/power/supply/qcom/smb1355-charger.c
+++ b/drivers/power/supply/qcom/smb1355-charger.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -449,7 +449,7 @@
if (of_property_read_bool(node, "qcom,stacked-batfet"))
chip->dt.pl_batfet_mode = POWER_SUPPLY_PL_STACKED_BATFET;
- return rc;
+ return 0;
}
/*****************************
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 3b37a37..31cf232 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -365,6 +365,16 @@
is handled by transport plug-ins that can be individually enabled and
configured separately.
+config MSM_TZ_SMMU
+ depends on ARCH_MSM8953
+ bool "Helper functions for SMMU configuration through TZ"
+ help
+ Say 'Y' here for targets that need to call into TZ to configure
+ SMMUs for any reason (for example, for errata workarounds or
+ configuration of SMMU virtualization).
+
+ If unsure, say N.
+
config MSM_GLINK_LOOPBACK_SERVER
bool "Generic Link (G-Link) Loopback Server"
help
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index efa702f..f34b714 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -35,6 +35,7 @@
obj-$(CONFIG_MSM_SMEM) += msm_smem.o smem_debug.o
obj-$(CONFIG_MSM_SMD) += msm_smd.o smd_debug.o smd_private.o smd_init_dt.o smsm_debug.o
obj-$(CONFIG_MSM_GLINK) += glink.o glink_debugfs.o glink_ssr.o
+obj-$(CONFIG_MSM_TZ_SMMU) += msm_tz_smmu.o
obj-$(CONFIG_MSM_GLINK_LOOPBACK_SERVER) += glink_loopback_server.o
obj-$(CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT) += glink_smem_native_xprt.o
obj-$(CONFIG_MSM_GLINK_SPI_XPRT) += glink_spi_xprt.o
@@ -83,6 +84,12 @@
obj-$(CONFIG_QCOM_DCC) += dcc.o
obj-$(CONFIG_QCOM_DCC_V2) += dcc_v2.o
obj-$(CONFIG_QTI_RPM_STATS_LOG) += rpm_stats.o
+ifdef CONFIG_MSM_RPM_SMD
+ obj-$(CONFIG_QTI_RPM_STATS_LOG) += rpm_master_stat.o
+endif
+ifdef CONFIG_QTI_RPMH_API
+ obj-$(CONFIG_QTI_RPM_STATS_LOG) += rpmh_master_stat.o
+endif
obj-$(CONFIG_QCOM_SMCINVOKE) += smcinvoke.o
obj-$(CONFIG_QMP_DEBUGFS_CLIENT) += qmp-debugfs-client.o
obj-$(CONFIG_MSM_REMOTEQDSS) += remoteqdss.o
diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c
index 457dc5f..41a1a79 100644
--- a/drivers/soc/qcom/dcc_v2.c
+++ b/drivers/soc/qcom/dcc_v2.c
@@ -671,13 +671,14 @@
if (sscanf(buf, "%s", str) != 1)
return -EINVAL;
+ mutex_lock(&drvdata->mutex);
if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
dev_err(dev,
"Select link list to program using curr_list\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
- mutex_lock(&drvdata->mutex);
if (drvdata->enable[drvdata->curr_list]) {
ret = -EBUSY;
goto out;
@@ -771,10 +772,21 @@
static ssize_t dcc_show_enable(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ int ret;
struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%u\n",
+ mutex_lock(&drvdata->mutex);
+ if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
+ dev_err(dev, "Select link list to program using curr_list\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = scnprintf(buf, PAGE_SIZE, "%u\n",
(unsigned int)drvdata->enable[drvdata->curr_list]);
+err:
+ mutex_unlock(&drvdata->mutex);
+ return ret;
}
static ssize_t dcc_store_enable(struct device *dev,
@@ -812,10 +824,13 @@
buf[0] = '\0';
- if (drvdata->curr_list >= DCC_MAX_LINK_LIST)
- return -EINVAL;
-
mutex_lock(&drvdata->mutex);
+ if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
+ dev_err(dev, "Select link list to program using curr_list\n");
+ count = -EINVAL;
+ goto err;
+ }
+
list_for_each_entry(entry,
&drvdata->cfg_head[drvdata->curr_list], list) {
switch (entry->desc_type) {
@@ -852,8 +867,8 @@
count += len;
}
+err:
mutex_unlock(&drvdata->mutex);
-
return count;
}
@@ -866,6 +881,12 @@
mutex_lock(&drvdata->mutex);
+ if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
+ dev_err(drvdata->dev, "Select link list to program using curr_list\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
if (!len) {
dev_err(drvdata->dev, "DCC: Invalid length\n");
ret = -EINVAL;
@@ -959,11 +980,6 @@
if (nval <= 0 || nval > 3)
return -EINVAL;
- if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
- dev_err(dev, "Select link list to program using curr_list\n");
- return -EINVAL;
- }
-
if (nval == 1) {
len = 1;
apb_bus = 0;
@@ -1028,6 +1044,12 @@
struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
mutex_lock(&drvdata->mutex);
+ if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
+ dev_err(dev, "Select link list to program using curr_list\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
if (!drvdata->enable[drvdata->curr_list]) {
ret = -EINVAL;
goto err;
@@ -1049,6 +1071,13 @@
struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
mutex_lock(&drvdata->mutex);
+
+ if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
+ dev_err(dev, "Select link list to program using curr_list\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
if (!drvdata->enable[drvdata->curr_list]) {
ret = -EINVAL;
goto err;
@@ -1159,6 +1188,12 @@
goto err;
}
+ if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
+ dev_err(dev, "Select link list to program using curr_list\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
if (list_empty(&drvdata->cfg_head[drvdata->curr_list])) {
dev_err(drvdata->dev, "DCC: No read address programmed\n");
ret = -EPERM;
@@ -1266,6 +1301,12 @@
mutex_lock(&drvdata->mutex);
+ if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
+ dev_err(dev, "Select link list to program using curr_list\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
if (drvdata->enable[drvdata->curr_list]) {
ret = -EBUSY;
goto out;
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index b315a97..d8cc2c4 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2994,7 +2994,7 @@
if (!wait_for_completion_timeout(
&ctx->int_req_ack_complete,
ctx->rx_intent_req_timeout_jiffies)) {
- GLINK_ERR_CH(ctx,
+ GLINK_ERR(
"%s: Intent request ack with size: %zu not granted for lcid\n",
__func__, size);
ret = -ETIMEDOUT;
@@ -3014,7 +3014,7 @@
if (!wait_for_completion_timeout(
&ctx->int_req_complete,
ctx->rx_intent_req_timeout_jiffies)) {
- GLINK_ERR_CH(ctx,
+ GLINK_ERR(
"%s: Intent request with size: %zu not granted for lcid\n",
__func__, size);
ret = -ETIMEDOUT;
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index 187c80d..9becb10 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2028,6 +2028,7 @@
/* Need enough space to write the command and some data */
if (size <= sizeof(cmd)) {
einfo->tx_resume_needed = true;
+ send_tx_blocked_signal(einfo);
spin_unlock_irqrestore(&einfo->write_lock, flags);
srcu_read_unlock(&einfo->use_ref, rcu_id);
return -EAGAIN;
@@ -2296,6 +2297,7 @@
einfo->ramp_time_us[i] = arr32[i];
rc = 0;
+ kfree(arr32);
return rc;
invalid_key:
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 0c7171a..649d0ff 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -38,6 +38,7 @@
#include <linux/uaccess.h>
#include <linux/qpnp/qpnp-adc.h>
#include <linux/etherdevice.h>
+#include <linux/of_gpio.h>
#include <soc/qcom/memory_dump.h>
#include <soc/qcom/icnss.h>
#include <soc/qcom/msm_qmi_interface.h>
@@ -461,6 +462,7 @@
atomic_t pm_count;
struct ramdump_device *msa0_dump_dev;
bool bypass_s1_smmu;
+ bool force_err_fatal;
u8 cause_for_rejuvenation;
u8 requesting_sub_system;
u16 line_number;
@@ -1176,6 +1178,48 @@
}
EXPORT_SYMBOL(icnss_power_off);
+static irqreturn_t fw_error_fatal_handler(int irq, void *ctx)
+{
+ struct icnss_priv *priv = ctx;
+
+ if (priv)
+ priv->force_err_fatal = true;
+
+ icnss_pr_err("Received force error fatal request from FW\n");
+
+ return IRQ_HANDLED;
+}
+
+static void icnss_register_force_error_fatal(struct icnss_priv *priv)
+{
+ int gpio, irq, ret;
+
+ if (!of_find_property(priv->pdev->dev.of_node,
+ "qcom,gpio-force-fatal-error", NULL)) {
+ icnss_pr_dbg("Error fatal smp2p handler not registered\n");
+ return;
+ }
+ gpio = of_get_named_gpio(priv->pdev->dev.of_node,
+ "qcom,gpio-force-fatal-error", 0);
+ if (!gpio_is_valid(gpio)) {
+ icnss_pr_err("Invalid GPIO for error fatal smp2p %d\n", gpio);
+ return;
+ }
+ irq = gpio_to_irq(gpio);
+ if (irq < 0) {
+ icnss_pr_err("Invalid IRQ for error fatal smp2p %u\n", irq);
+ return;
+ }
+ ret = request_irq(irq, fw_error_fatal_handler,
+ IRQF_TRIGGER_RISING, "wlanfw-err", priv);
+ if (ret < 0) {
+ icnss_pr_err("Unable to regiser for error fatal IRQ handler %d",
+ irq);
+ return;
+ }
+ icnss_pr_dbg("FW force error fatal handler registered\n");
+}
+
static int wlfw_msa_mem_info_send_sync_msg(void)
{
int ret;
@@ -2065,6 +2109,8 @@
icnss_init_vph_monitor(penv);
+ icnss_register_force_error_fatal(penv);
+
return ret;
err_setup_msa:
@@ -2363,6 +2409,9 @@
goto out;
}
+ if (priv->force_err_fatal)
+ ICNSS_ASSERT(0);
+
if (event_data->crashed)
icnss_fw_crashed(priv, event_data);
else
diff --git a/drivers/soc/qcom/minidump_log.c b/drivers/soc/qcom/minidump_log.c
index c65dfd9..87e1700 100644
--- a/drivers/soc/qcom/minidump_log.c
+++ b/drivers/soc/qcom/minidump_log.c
@@ -76,6 +76,9 @@
struct md_region ksp_entry, ktsk_entry;
u32 cpu = smp_processor_id();
+ if (is_idle_task(current))
+ return;
+
if (sp < KIMAGE_VADDR || sp > -256UL)
sp = current_stack_pointer;
diff --git a/drivers/soc/qcom/minidump_private.h b/drivers/soc/qcom/minidump_private.h
index 81ebb1c..1e61dd7 100644
--- a/drivers/soc/qcom/minidump_private.h
+++ b/drivers/soc/qcom/minidump_private.h
@@ -65,7 +65,7 @@
u32 encryption_status;
u32 encryption_required;
u32 ss_region_count;
- struct md_ss_region *md_ss_smem_regions_baseptr;
+ u64 md_ss_smem_regions_baseptr;
};
/**
diff --git a/drivers/soc/qcom/msm_glink_pkt.c b/drivers/soc/qcom/msm_glink_pkt.c
index 25099bb..9a9b4df 100644
--- a/drivers/soc/qcom/msm_glink_pkt.c
+++ b/drivers/soc/qcom/msm_glink_pkt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -571,8 +571,10 @@
mutex_lock(&devp->ch_lock);
devp->ch_state = event;
if (event == GLINK_CONNECTED) {
- if (!devp->handle)
- devp->handle = handle;
+ if (!devp->handle) {
+ GLINK_PKT_ERR("%s: Invalid device handle\n", __func__);
+ goto exit;
+ }
devp->in_reset = 0;
wake_up_interruptible(&devp->ch_opened_wait_queue);
} else if (event == GLINK_REMOTE_DISCONNECTED) {
@@ -584,6 +586,7 @@
devp->handle = NULL;
wake_up_interruptible(&devp->ch_closed_wait_queue);
}
+exit:
mutex_unlock(&devp->ch_lock);
kfree(work_item);
}
diff --git a/drivers/soc/qcom/msm_minidump.c b/drivers/soc/qcom/msm_minidump.c
index 3fe62f1..3644dd6 100644
--- a/drivers/soc/qcom/msm_minidump.c
+++ b/drivers/soc/qcom/msm_minidump.c
@@ -354,7 +354,7 @@
return -ENOMEM;
md_ss_toc->md_ss_smem_regions_baseptr =
- (void *)virt_to_phys(minidump_table.md_regions);
+ virt_to_phys(minidump_table.md_regions);
/* First entry would be ELF header */
md_ss_toc->ss_region_count = 1;
diff --git a/drivers/soc/qcom/msm_tz_smmu.c b/drivers/soc/qcom/msm_tz_smmu.c
new file mode 100644
index 0000000..f608426
--- /dev/null
+++ b/drivers/soc/qcom/msm_tz_smmu.c
@@ -0,0 +1,130 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/msm_tz_smmu.h>
+
+static const char * const device_id_mappings[] = {
+ [TZ_DEVICE_VIDEO] = "VIDEO",
+ [TZ_DEVICE_MDSS] = "MDSS",
+ [TZ_DEVICE_LPASS] = "LPASS",
+ [TZ_DEVICE_MDSS_BOOT] = "MDSS_BOOT",
+ [TZ_DEVICE_USB1_HS] = "USB1_HS",
+ [TZ_DEVICE_OCMEM] = "OCMEM",
+ [TZ_DEVICE_LPASS_CORE] = "LPASS_CORE",
+ [TZ_DEVICE_VPU] = "VPU",
+ [TZ_DEVICE_COPSS_SMMU] = "COPSS_SMMU",
+ [TZ_DEVICE_USB3_0] = "USB3_0",
+ [TZ_DEVICE_USB3_1] = "USB3_1",
+ [TZ_DEVICE_PCIE_0] = "PCIE_0",
+ [TZ_DEVICE_PCIE_1] = "PCIE_1",
+ [TZ_DEVICE_BCSS] = "BCSS",
+ [TZ_DEVICE_VCAP] = "VCAP",
+ [TZ_DEVICE_PCIE20] = "PCIE20",
+ [TZ_DEVICE_IPA] = "IPA",
+ [TZ_DEVICE_APPS] = "APPS",
+ [TZ_DEVICE_GPU] = "GPU",
+ [TZ_DEVICE_UFS] = "UFS",
+ [TZ_DEVICE_ICE] = "ICE",
+ [TZ_DEVICE_ROT] = "ROT",
+ [TZ_DEVICE_VFE] = "VFE",
+ [TZ_DEVICE_ANOC0] = "ANOC0",
+ [TZ_DEVICE_ANOC1] = "ANOC1",
+ [TZ_DEVICE_ANOC2] = "ANOC2",
+ [TZ_DEVICE_CPP] = "CPP",
+ [TZ_DEVICE_JPEG] = "JPEG",
+};
+
+#define MAX_DEVICE_ID_NAME_LEN 20
+
+#define TZ_SMMU_PREPARE_ATOS_ID 0x21
+#define TZ_SMMU_ATOS_START 1
+#define TZ_SMMU_ATOS_END 0
+
+#define SMMU_CHANGE_PAGETABLE_FORMAT 0X01
+
+enum tz_smmu_device_id msm_dev_to_device_id(struct device *dev)
+{
+ const char *device_id;
+ enum tz_smmu_device_id iter;
+
+ if (of_property_read_string(dev->of_node, "qcom,tz-device-id",
+ &device_id)) {
+ dev_err(dev, "no qcom,device-id property\n");
+ return TZ_DEVICE_MAX;
+ }
+
+ for (iter = TZ_DEVICE_START; iter < TZ_DEVICE_MAX; iter++)
+ if (!strcmp(device_id_mappings[iter], device_id))
+ return iter;
+
+ return TZ_DEVICE_MAX;
+}
+
+static int __msm_tz_smmu_atos(struct device *dev, int cb_num, int operation)
+{
+ int ret;
+ struct scm_desc desc = {0};
+ enum tz_smmu_device_id devid = msm_dev_to_device_id(dev);
+
+ if (devid == TZ_DEVICE_MAX)
+ return -ENODEV;
+
+ desc.args[0] = devid;
+ desc.args[1] = cb_num;
+ desc.args[2] = operation;
+ desc.arginfo = SCM_ARGS(3, SCM_VAL, SCM_VAL, SCM_VAL);
+
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP, TZ_SMMU_PREPARE_ATOS_ID),
+ &desc);
+ if (ret)
+ pr_info("%s: TZ SMMU ATOS %s failed, ret = %d\n",
+ __func__,
+ operation == TZ_SMMU_ATOS_START ? "start" : "end",
+ ret);
+ return ret;
+}
+
+int msm_tz_smmu_atos_start(struct device *dev, int cb_num)
+{
+ return __msm_tz_smmu_atos(dev, cb_num, TZ_SMMU_ATOS_START);
+}
+
+int msm_tz_smmu_atos_end(struct device *dev, int cb_num)
+{
+ return __msm_tz_smmu_atos(dev, cb_num, TZ_SMMU_ATOS_END);
+}
+
+int msm_tz_set_cb_format(enum tz_smmu_device_id sec_id, int cbndx)
+{
+ struct scm_desc desc = {0};
+ int ret = 0;
+
+ desc.args[0] = sec_id;
+ desc.args[1] = cbndx;
+ desc.args[2] = 1; /* Enable */
+ desc.arginfo = SCM_ARGS(3, SCM_VAL, SCM_VAL, SCM_VAL);
+
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_SMMU_PROGRAM,
+ SMMU_CHANGE_PAGETABLE_FORMAT), &desc);
+
+ if (ret) {
+ WARN(1, "Format change failed for CB %d with ret %d\n",
+ cbndx, ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/soc/qcom/rpm_master_stat.c b/drivers/soc/qcom/rpm_master_stat.c
new file mode 100644
index 0000000..bf4f5ec
--- /dev/null
+++ b/drivers/soc/qcom/rpm_master_stat.c
@@ -0,0 +1,510 @@
+/* Copyright (c) 2012-2014, 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <linux/uaccess.h>
+
+
+#define RPM_MASTERS_BUF_LEN 400
+
+#define SNPRINTF(buf, size, format, ...) \
+ do { \
+ if (size > 0) { \
+ int ret; \
+ ret = snprintf(buf, size, format, ## __VA_ARGS__); \
+ if (ret > size) { \
+ buf += size; \
+ size = 0; \
+ } else { \
+ buf += ret; \
+ size -= ret; \
+ } \
+ } \
+ } while (0)
+
+#define GET_MASTER_NAME(a, prvdata) \
+ ((a >= prvdata->num_masters) ? "Invalid Master Name" : \
+ prvdata->master_names[a])
+
+#define GET_FIELD(a) ((strnstr(#a, ".", 80) + 1))
+
+struct msm_rpm_master_stats_platform_data {
+ phys_addr_t phys_addr_base;
+ u32 phys_size;
+ char **masters;
+ /*
+ * RPM maintains PC stats for each master in MSG RAM,
+ * it allocates 256 bytes for this use.
+ * No of masters differs for different targets.
+ * Based on the number of masters, linux rpm stat
+ * driver reads (32 * num_masters) bytes to display
+ * master stats.
+ */
+ s32 num_masters;
+ u32 master_offset;
+ u32 version;
+};
+
+static DEFINE_MUTEX(msm_rpm_master_stats_mutex);
+
+struct msm_rpm_master_stats {
+ uint32_t active_cores;
+ uint32_t numshutdowns;
+ uint64_t shutdown_req;
+ uint64_t wakeup_ind;
+ uint64_t bringup_req;
+ uint64_t bringup_ack;
+ uint32_t wakeup_reason; /* 0 = rude wakeup, 1 = scheduled wakeup */
+ uint32_t last_sleep_transition_duration;
+ uint32_t last_wake_transition_duration;
+ uint32_t xo_count;
+ uint64_t xo_last_entered_at;
+ uint64_t xo_last_exited_at;
+ uint64_t xo_accumulated_duration;
+};
+
+struct msm_rpm_master_stats_private_data {
+ void __iomem *reg_base;
+ u32 len;
+ char **master_names;
+ u32 num_masters;
+ char buf[RPM_MASTERS_BUF_LEN];
+ struct msm_rpm_master_stats_platform_data *platform_data;
+};
+
+static int msm_rpm_master_stats_file_close(struct inode *inode,
+ struct file *file)
+{
+ struct msm_rpm_master_stats_private_data *private = file->private_data;
+
+ mutex_lock(&msm_rpm_master_stats_mutex);
+ if (private->reg_base)
+ iounmap(private->reg_base);
+ kfree(file->private_data);
+ mutex_unlock(&msm_rpm_master_stats_mutex);
+
+ return 0;
+}
+
+static int msm_rpm_master_copy_stats(
+ struct msm_rpm_master_stats_private_data *prvdata)
+{
+ struct msm_rpm_master_stats record;
+ struct msm_rpm_master_stats_platform_data *pdata;
+ static int master_cnt;
+ int count, j = 0;
+ char *buf;
+ unsigned long active_cores;
+
+ /* Iterate possible number of masters */
+ if (master_cnt > prvdata->num_masters - 1) {
+ master_cnt = 0;
+ return 0;
+ }
+
+ pdata = prvdata->platform_data;
+ count = RPM_MASTERS_BUF_LEN;
+ buf = prvdata->buf;
+
+ if (prvdata->platform_data->version == 2) {
+ SNPRINTF(buf, count, "%s\n",
+ GET_MASTER_NAME(master_cnt, prvdata));
+
+ record.shutdown_req = readq_relaxed(prvdata->reg_base +
+ (master_cnt * pdata->master_offset +
+ offsetof(struct msm_rpm_master_stats, shutdown_req)));
+
+ SNPRINTF(buf, count, "\t%s:0x%llX\n",
+ GET_FIELD(record.shutdown_req),
+ record.shutdown_req);
+
+ record.wakeup_ind = readq_relaxed(prvdata->reg_base +
+ (master_cnt * pdata->master_offset +
+ offsetof(struct msm_rpm_master_stats, wakeup_ind)));
+
+ SNPRINTF(buf, count, "\t%s:0x%llX\n",
+ GET_FIELD(record.wakeup_ind),
+ record.wakeup_ind);
+
+ record.bringup_req = readq_relaxed(prvdata->reg_base +
+ (master_cnt * pdata->master_offset +
+ offsetof(struct msm_rpm_master_stats, bringup_req)));
+
+ SNPRINTF(buf, count, "\t%s:0x%llX\n",
+ GET_FIELD(record.bringup_req),
+ record.bringup_req);
+
+ record.bringup_ack = readq_relaxed(prvdata->reg_base +
+ (master_cnt * pdata->master_offset +
+ offsetof(struct msm_rpm_master_stats, bringup_ack)));
+
+ SNPRINTF(buf, count, "\t%s:0x%llX\n",
+ GET_FIELD(record.bringup_ack),
+ record.bringup_ack);
+
+ record.xo_last_entered_at = readq_relaxed(prvdata->reg_base +
+ (master_cnt * pdata->master_offset +
+ offsetof(struct msm_rpm_master_stats,
+ xo_last_entered_at)));
+
+ SNPRINTF(buf, count, "\t%s:0x%llX\n",
+ GET_FIELD(record.xo_last_entered_at),
+ record.xo_last_entered_at);
+
+ record.xo_last_exited_at = readq_relaxed(prvdata->reg_base +
+ (master_cnt * pdata->master_offset +
+ offsetof(struct msm_rpm_master_stats,
+ xo_last_exited_at)));
+
+ SNPRINTF(buf, count, "\t%s:0x%llX\n",
+ GET_FIELD(record.xo_last_exited_at),
+ record.xo_last_exited_at);
+
+ record.xo_accumulated_duration =
+ readq_relaxed(prvdata->reg_base +
+ (master_cnt * pdata->master_offset +
+ offsetof(struct msm_rpm_master_stats,
+ xo_accumulated_duration)));
+
+ SNPRINTF(buf, count, "\t%s:0x%llX\n",
+ GET_FIELD(record.xo_accumulated_duration),
+ record.xo_accumulated_duration);
+
+ record.last_sleep_transition_duration =
+ readl_relaxed(prvdata->reg_base +
+ (master_cnt * pdata->master_offset +
+ offsetof(struct msm_rpm_master_stats,
+ last_sleep_transition_duration)));
+
+ SNPRINTF(buf, count, "\t%s:0x%x\n",
+ GET_FIELD(record.last_sleep_transition_duration),
+ record.last_sleep_transition_duration);
+
+ record.last_wake_transition_duration =
+ readl_relaxed(prvdata->reg_base +
+ (master_cnt * pdata->master_offset +
+ offsetof(struct msm_rpm_master_stats,
+ last_wake_transition_duration)));
+
+ SNPRINTF(buf, count, "\t%s:0x%x\n",
+ GET_FIELD(record.last_wake_transition_duration),
+ record.last_wake_transition_duration);
+
+ record.xo_count =
+ readl_relaxed(prvdata->reg_base +
+ (master_cnt * pdata->master_offset +
+ offsetof(struct msm_rpm_master_stats,
+ xo_count)));
+
+ SNPRINTF(buf, count, "\t%s:0x%x\n",
+ GET_FIELD(record.xo_count),
+ record.xo_count);
+
+ record.wakeup_reason = readl_relaxed(prvdata->reg_base +
+ (master_cnt * pdata->master_offset +
+ offsetof(struct msm_rpm_master_stats,
+ wakeup_reason)));
+
+ SNPRINTF(buf, count, "\t%s:0x%x\n",
+ GET_FIELD(record.wakeup_reason),
+ record.wakeup_reason);
+
+ record.numshutdowns = readl_relaxed(prvdata->reg_base +
+ (master_cnt * pdata->master_offset +
+ offsetof(struct msm_rpm_master_stats, numshutdowns)));
+
+ SNPRINTF(buf, count, "\t%s:0x%x\n",
+ GET_FIELD(record.numshutdowns),
+ record.numshutdowns);
+
+ record.active_cores = readl_relaxed(prvdata->reg_base +
+ (master_cnt * pdata->master_offset) +
+ offsetof(struct msm_rpm_master_stats, active_cores));
+
+ SNPRINTF(buf, count, "\t%s:0x%x\n",
+ GET_FIELD(record.active_cores),
+ record.active_cores);
+ } else {
+ SNPRINTF(buf, count, "%s\n",
+ GET_MASTER_NAME(master_cnt, prvdata));
+
+ record.numshutdowns = readl_relaxed(prvdata->reg_base +
+ (master_cnt * pdata->master_offset) + 0x0);
+
+ SNPRINTF(buf, count, "\t%s:0x%0x\n",
+ GET_FIELD(record.numshutdowns),
+ record.numshutdowns);
+
+ record.active_cores = readl_relaxed(prvdata->reg_base +
+ (master_cnt * pdata->master_offset) + 0x4);
+
+ SNPRINTF(buf, count, "\t%s:0x%0x\n",
+ GET_FIELD(record.active_cores),
+ record.active_cores);
+ }
+
+ active_cores = record.active_cores;
+ j = find_first_bit(&active_cores, BITS_PER_LONG);
+ while (j < (BITS_PER_LONG - 1)) {
+ SNPRINTF(buf, count, "\t\tcore%d\n", j);
+ j = find_next_bit((const unsigned long *)&active_cores,
+ BITS_PER_LONG, j + 1);
+ }
+
+ if (j == (BITS_PER_LONG - 1))
+ SNPRINTF(buf, count, "\t\tcore%d\n", j);
+
+ master_cnt++;
+ return RPM_MASTERS_BUF_LEN - count;
+}
+
+static ssize_t msm_rpm_master_stats_file_read(struct file *file,
+ char __user *bufu, size_t count, loff_t *ppos)
+{
+ struct msm_rpm_master_stats_private_data *prvdata;
+ struct msm_rpm_master_stats_platform_data *pdata;
+ ssize_t ret;
+
+ mutex_lock(&msm_rpm_master_stats_mutex);
+ prvdata = file->private_data;
+ if (!prvdata) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ pdata = prvdata->platform_data;
+ if (!pdata) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (!bufu || count == 0) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (*ppos <= pdata->phys_size) {
+ prvdata->len = msm_rpm_master_copy_stats(prvdata);
+ *ppos = 0;
+ }
+
+ ret = simple_read_from_buffer(bufu, count, ppos,
+ prvdata->buf, prvdata->len);
+exit:
+ mutex_unlock(&msm_rpm_master_stats_mutex);
+ return ret;
+}
+
+static int msm_rpm_master_stats_file_open(struct inode *inode,
+ struct file *file)
+{
+ struct msm_rpm_master_stats_private_data *prvdata;
+ struct msm_rpm_master_stats_platform_data *pdata;
+ int ret = 0;
+
+ mutex_lock(&msm_rpm_master_stats_mutex);
+ pdata = inode->i_private;
+
+ file->private_data =
+ kzalloc(sizeof(struct msm_rpm_master_stats_private_data),
+ GFP_KERNEL);
+
+ if (!file->private_data) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ prvdata = file->private_data;
+
+ prvdata->reg_base = ioremap(pdata->phys_addr_base,
+ pdata->phys_size);
+ if (!prvdata->reg_base) {
+ kfree(file->private_data);
+ prvdata = NULL;
+ pr_err("%s: ERROR could not ioremap start=%pa, len=%u\n",
+ __func__, &pdata->phys_addr_base,
+ pdata->phys_size);
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ prvdata->len = 0;
+ prvdata->num_masters = pdata->num_masters;
+ prvdata->master_names = pdata->masters;
+ prvdata->platform_data = pdata;
+exit:
+ mutex_unlock(&msm_rpm_master_stats_mutex);
+ return ret;
+}
+
+static const struct file_operations msm_rpm_master_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_rpm_master_stats_file_open,
+ .read = msm_rpm_master_stats_file_read,
+ .release = msm_rpm_master_stats_file_close,
+ .llseek = no_llseek,
+};
+
+static struct msm_rpm_master_stats_platform_data
+ *msm_rpm_master_populate_pdata(struct device *dev)
+{
+ struct msm_rpm_master_stats_platform_data *pdata;
+ struct device_node *node = dev->of_node;
+ int rc = 0, i;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ goto err;
+
+ rc = of_property_read_u32(node, "qcom,master-stats-version",
+ &pdata->version);
+ if (rc) {
+ dev_err(dev, "master-stats-version missing rc=%d\n", rc);
+ goto err;
+ }
+
+ rc = of_property_read_u32(node, "qcom,master-offset",
+ &pdata->master_offset);
+ if (rc) {
+ dev_err(dev, "master-offset missing rc=%d\n", rc);
+ goto err;
+ }
+
+ pdata->num_masters = of_property_count_strings(node, "qcom,masters");
+ if (pdata->num_masters < 0) {
+ dev_err(dev, "Failed to get number of masters =%d\n",
+ pdata->num_masters);
+ goto err;
+ }
+
+ pdata->masters = devm_kzalloc(dev, sizeof(char *) * pdata->num_masters,
+ GFP_KERNEL);
+ if (!pdata->masters)
+ goto err;
+
+ /*
+ * Read master names from DT
+ */
+ for (i = 0; i < pdata->num_masters; i++) {
+ const char *master_name;
+
+ of_property_read_string_index(node, "qcom,masters",
+ i, &master_name);
+ pdata->masters[i] = devm_kzalloc(dev, sizeof(char) *
+ strlen(master_name) + 1, GFP_KERNEL);
+ if (!pdata->masters[i])
+ goto err;
+ strlcpy(pdata->masters[i], master_name,
+ strlen(master_name) + 1);
+ }
+ return pdata;
+err:
+ return NULL;
+}
+
+static int msm_rpm_master_stats_probe(struct platform_device *pdev)
+{
+ struct dentry *dent;
+ struct msm_rpm_master_stats_platform_data *pdata;
+ struct resource *res = NULL;
+
+ if (!pdev)
+ return -EINVAL;
+
+ if (pdev->dev.of_node)
+ pdata = msm_rpm_master_populate_pdata(&pdev->dev);
+ else
+ pdata = pdev->dev.platform_data;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "%s: Unable to get pdata\n", __func__);
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!res) {
+ dev_err(&pdev->dev,
+ "%s: Failed to get IO resource from platform device",
+ __func__);
+ return -ENXIO;
+ }
+
+ pdata->phys_addr_base = res->start;
+ pdata->phys_size = resource_size(res);
+
+ dent = debugfs_create_file("rpm_master_stats", 0444, NULL,
+ pdata, &msm_rpm_master_stats_fops);
+
+ if (!dent) {
+ dev_err(&pdev->dev, "%s: ERROR debugfs_create_file failed\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, dent);
+ return 0;
+}
+
+static int msm_rpm_master_stats_remove(struct platform_device *pdev)
+{
+ struct dentry *dent;
+
+ dent = platform_get_drvdata(pdev);
+ debugfs_remove(dent);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static const struct of_device_id rpm_master_table[] = {
+ {.compatible = "qcom,rpm-master-stats"},
+ {},
+};
+
+static struct platform_driver msm_rpm_master_stats_driver = {
+ .probe = msm_rpm_master_stats_probe,
+ .remove = msm_rpm_master_stats_remove,
+ .driver = {
+ .name = "msm_rpm_master_stats",
+ .owner = THIS_MODULE,
+ .of_match_table = rpm_master_table,
+ },
+};
+
+static int __init msm_rpm_master_stats_init(void)
+{
+ return platform_driver_register(&msm_rpm_master_stats_driver);
+}
+
+static void __exit msm_rpm_master_stats_exit(void)
+{
+ platform_driver_unregister(&msm_rpm_master_stats_driver);
+}
+
+module_init(msm_rpm_master_stats_init);
+module_exit(msm_rpm_master_stats_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM RPM Master Statistics driver");
+MODULE_ALIAS("platform:msm_master_stat_log");
diff --git a/drivers/soc/qcom/rpm_stats.c b/drivers/soc/qcom/rpm_stats.c
index e60a7ad..a39856b 100644
--- a/drivers/soc/qcom/rpm_stats.c
+++ b/drivers/soc/qcom/rpm_stats.c
@@ -53,6 +53,11 @@
u64 last_entered_at;
u64 last_exited_at;
u64 accumulated;
+#if defined(CONFIG_MSM_RPM_SMD)
+ u32 client_votes;
+ u32 reserved[3];
+#endif
+
};
struct msm_rpmstats_kobj_attr {
@@ -92,11 +97,21 @@
time_since_last_mode = get_time_in_sec(time_since_last_mode);
actual_last_sleep = get_time_in_msec(data->accumulated);
+#if defined(CONFIG_MSM_RPM_SMD)
+ return snprintf(buf, buflength,
+ "RPM Mode:%s\n\t count:%d\ntime in last mode(msec):%llu\n"
+ "time since last mode(sec):%llu\nactual last sleep(msec):%llu\n"
+ "client votes: %#010x\n\n",
+ stat_type, data->count, time_in_last_mode,
+ time_since_last_mode, actual_last_sleep,
+ data->client_votes);
+#else
return snprintf(buf, buflength,
"RPM Mode:%s\n\t count:%d\ntime in last mode(msec):%llu\n"
"time since last mode(sec):%llu\nactual last sleep(msec):%llu\n\n",
stat_type, data->count, time_in_last_mode,
time_since_last_mode, actual_last_sleep);
+#endif
}
static inline u32 msm_rpmstats_read_long_register(void __iomem *regbase,
@@ -141,6 +156,12 @@
data.accumulated = msm_rpmstats_read_quad_register(reg,
i, offsetof(struct msm_rpm_stats_data,
accumulated));
+#if defined(CONFIG_MSM_RPM_SMD)
+ data.client_votes = msm_rpmstats_read_long_register(reg,
+ i, offsetof(struct msm_rpm_stats_data,
+ client_votes));
+#endif
+
length += msm_rpmstats_append_data_to_buf(prvdata->buf + length,
&data, sizeof(prvdata->buf) - length);
prvdata->read_idx++;
diff --git a/drivers/soc/qcom/rpmh_master_stat.c b/drivers/soc/qcom/rpmh_master_stat.c
new file mode 100644
index 0000000..2c379a0
--- /dev/null
+++ b/drivers/soc/qcom/rpmh_master_stat.c
@@ -0,0 +1,220 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, KBUILD_MODNAME
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <linux/uaccess.h>
+#include <soc/qcom/smem.h>
+
+enum master_smem_id {
+ MPSS = 605,
+ ADSP,
+ CDSP,
+ SLPI,
+ GPU,
+ DISPLAY,
+};
+
+enum master_pid {
+ PID_APSS = 0,
+ PID_MPSS = 1,
+ PID_ADSP = 2,
+ PID_SLPI = 3,
+ PID_CDSP = 5,
+ PID_GPU = PID_APSS,
+ PID_DISPLAY = PID_APSS,
+};
+
+struct msm_rpmh_master_data {
+ char *master_name;
+ enum master_smem_id smem_id;
+ enum master_pid pid;
+};
+
+static const struct msm_rpmh_master_data rpmh_masters[] = {
+ {"MPSS", MPSS, PID_MPSS},
+ {"ADSP", ADSP, PID_ADSP},
+ {"CDSP", CDSP, PID_CDSP},
+ {"SLPI", SLPI, PID_SLPI},
+ {"GPU", GPU, PID_GPU},
+ {"DISPLAY", DISPLAY, PID_DISPLAY},
+};
+
+struct msm_rpmh_master_stats {
+ uint32_t version_id;
+ uint32_t counts;
+ uint64_t last_entered_at;
+ uint64_t last_exited_at;
+ uint64_t accumulated_duration;
+};
+
+struct rpmh_master_stats_prv_data {
+ struct kobj_attribute ka;
+ struct kobject *kobj;
+};
+
+static DEFINE_MUTEX(rpmh_stats_mutex);
+
+static ssize_t msm_rpmh_master_stats_print_data(char *prvbuf, ssize_t length,
+ struct msm_rpmh_master_stats *record,
+ const char *name)
+{
+ return snprintf(prvbuf, length, "%s\n\tVersion:0x%x\n"
+ "\tSleep Count:0x%x\n"
+ "\tSleep Last Entered At:0x%llx\n"
+ "\tSleep Last Exited At:0x%llx\n"
+ "\tSleep Accumulated Duration:0x%llx\n\n",
+ name, record->version_id, record->counts,
+ record->last_entered_at, record->last_exited_at,
+ record->accumulated_duration);
+}
+
+static ssize_t msm_rpmh_master_stats_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ ssize_t length;
+ int i = 0;
+ unsigned int size = 0;
+ struct msm_rpmh_master_stats *record = NULL;
+
+ /*
+ * Read SMEM data written by masters
+ */
+
+ mutex_lock(&rpmh_stats_mutex);
+
+ for (i = 0, length = 0; i < ARRAY_SIZE(rpmh_masters); i++) {
+ record = (struct msm_rpmh_master_stats *) smem_get_entry(
+ rpmh_masters[i].smem_id, &size,
+ rpmh_masters[i].pid, 0);
+ if (!IS_ERR_OR_NULL(record) && (PAGE_SIZE - length > 0))
+ length += msm_rpmh_master_stats_print_data(
+ buf + length, PAGE_SIZE - length,
+ record,
+ rpmh_masters[i].master_name);
+ }
+
+ mutex_unlock(&rpmh_stats_mutex);
+
+ return length;
+}
+
+static int msm_rpmh_master_stats_probe(struct platform_device *pdev)
+{
+ struct rpmh_master_stats_prv_data *prvdata = NULL;
+ struct kobject *rpmh_master_stats_kobj = NULL;
+ int ret = 0;
+
+ if (!pdev)
+ return -EINVAL;
+
+ prvdata = kzalloc(sizeof(struct rpmh_master_stats_prv_data),
+ GFP_KERNEL);
+ if (!prvdata) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ rpmh_master_stats_kobj = kobject_create_and_add(
+ "rpmh_stats",
+ power_kobj);
+ if (!rpmh_master_stats_kobj) {
+ ret = -ENOMEM;
+ kfree(prvdata);
+ goto fail;
+ }
+
+ prvdata->kobj = rpmh_master_stats_kobj;
+
+ sysfs_attr_init(&prvdata->ka.attr);
+ prvdata->ka.attr.mode = 0444;
+ prvdata->ka.attr.name = "master_stats";
+ prvdata->ka.show = msm_rpmh_master_stats_show;
+ prvdata->ka.store = NULL;
+
+ ret = sysfs_create_file(prvdata->kobj, &prvdata->ka.attr);
+ if (ret) {
+ pr_err("sysfs_create_file failed\n");
+ kobject_put(prvdata->kobj);
+ kfree(prvdata);
+ goto fail;
+ }
+
+ platform_set_drvdata(pdev, prvdata);
+
+fail:
+ return ret;
+}
+
+static int msm_rpmh_master_stats_remove(struct platform_device *pdev)
+{
+ struct rpmh_master_stats_prv_data *prvdata;
+
+ if (!pdev)
+ return -EINVAL;
+
+ prvdata = (struct rpmh_master_stats_prv_data *)
+ platform_get_drvdata(pdev);
+
+ sysfs_remove_file(prvdata->kobj, &prvdata->ka.attr);
+ kobject_put(prvdata->kobj);
+ kfree(prvdata);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id rpmh_master_table[] = {
+ {.compatible = "qcom,rpmh-master-stats"},
+ {},
+};
+
+static struct platform_driver msm_rpmh_master_stats_driver = {
+ .probe = msm_rpmh_master_stats_probe,
+ .remove = msm_rpmh_master_stats_remove,
+ .driver = {
+ .name = "msm_rpmh_master_stats",
+ .owner = THIS_MODULE,
+ .of_match_table = rpmh_master_table,
+ },
+};
+
+static int __init msm_rpmh_master_stats_init(void)
+{
+ return platform_driver_register(&msm_rpmh_master_stats_driver);
+}
+
+static void __exit msm_rpmh_master_stats_exit(void)
+{
+ platform_driver_unregister(&msm_rpmh_master_stats_driver);
+}
+
+module_init(msm_rpmh_master_stats_init);
+module_exit(msm_rpmh_master_stats_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM RPMH Master Statistics driver");
+MODULE_ALIAS("platform:msm_rpmh_master_stat_log");
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c
index 5289cd0..1c8bc51 100644
--- a/drivers/soc/qcom/secure_buffer.c
+++ b/drivers/soc/qcom/secure_buffer.c
@@ -25,12 +25,28 @@
DEFINE_MUTEX(secure_buffer_mutex);
+struct cp2_mem_chunks {
+ u32 chunk_list;
+ u32 chunk_list_size;
+ u32 chunk_size;
+} __attribute__ ((__packed__));
+
+struct cp2_lock_req {
+ struct cp2_mem_chunks chunks;
+ u32 mem_usage;
+ u32 lock;
+} __attribute__ ((__packed__));
+
struct mem_prot_info {
phys_addr_t addr;
u64 size;
};
#define MEM_PROT_ASSIGN_ID 0x16
+#define MEM_PROTECT_LOCK_ID2 0x0A
+#define MEM_PROTECT_LOCK_ID2_FLAT 0x11
+#define V2_CHUNK_SIZE SZ_1M
+#define FEATURE_ID_CP 12
struct dest_vm_and_perm_info {
u32 vm;
@@ -42,6 +58,134 @@
static void *qcom_secure_mem;
#define QCOM_SECURE_MEM_SIZE (512*1024)
+static int secure_buffer_change_chunk(u32 chunks,
+ u32 nchunks,
+ u32 chunk_size,
+ int lock)
+{
+ struct cp2_lock_req request;
+ u32 resp;
+ int ret;
+ struct scm_desc desc = {0};
+
+ desc.args[0] = request.chunks.chunk_list = chunks;
+ desc.args[1] = request.chunks.chunk_list_size = nchunks;
+ desc.args[2] = request.chunks.chunk_size = chunk_size;
+ /* Usage is now always 0 */
+ desc.args[3] = request.mem_usage = 0;
+ desc.args[4] = request.lock = lock;
+ desc.args[5] = 0;
+ desc.arginfo = SCM_ARGS(6, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL,
+ SCM_VAL);
+
+ kmap_flush_unused();
+ kmap_atomic_flush_unused();
+
+ if (!is_scm_armv8()) {
+ ret = scm_call(SCM_SVC_MP, MEM_PROTECT_LOCK_ID2,
+ &request, sizeof(request), &resp, sizeof(resp));
+ } else {
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
+ MEM_PROTECT_LOCK_ID2_FLAT), &desc);
+ resp = desc.ret[0];
+ }
+
+ return ret;
+}
+
+static int secure_buffer_change_table(struct sg_table *table, int lock)
+{
+ int i, j;
+ int ret = -EINVAL;
+ u32 *chunk_list;
+ struct scatterlist *sg;
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ int nchunks;
+ int size = sg->length;
+ int chunk_list_len;
+ phys_addr_t chunk_list_phys;
+
+ /*
+ * This should theoretically be a phys_addr_t but the protocol
+ * indicates this should be a u32.
+ */
+ u32 base;
+ u64 tmp = sg_dma_address(sg);
+
+ WARN((tmp >> 32) & 0xffffffff,
+ "%s: there are ones in the upper 32 bits of the sg at %p! They will be truncated! Address: 0x%llx\n",
+ __func__, sg, tmp);
+ if (unlikely(!size || (size % V2_CHUNK_SIZE))) {
+ WARN(1,
+ "%s: chunk %d has invalid size: 0x%x. Must be a multiple of 0x%x\n",
+ __func__, i, size, V2_CHUNK_SIZE);
+ return -EINVAL;
+ }
+
+ base = (u32)tmp;
+
+ nchunks = size / V2_CHUNK_SIZE;
+ chunk_list_len = sizeof(u32)*nchunks;
+
+ chunk_list = kzalloc(chunk_list_len, GFP_KERNEL);
+
+ if (!chunk_list)
+ return -ENOMEM;
+
+ chunk_list_phys = virt_to_phys(chunk_list);
+ for (j = 0; j < nchunks; j++)
+ chunk_list[j] = base + j * V2_CHUNK_SIZE;
+
+ /*
+ * Flush the chunk list before sending the memory to the
+ * secure environment to ensure the data is actually present
+ * in RAM
+ */
+ dmac_flush_range(chunk_list, chunk_list + chunk_list_len);
+
+ ret = secure_buffer_change_chunk(chunk_list_phys,
+ nchunks, V2_CHUNK_SIZE, lock);
+
+ if (!ret) {
+ /*
+ * Set or clear the private page flag to communicate the
+ * status of the chunk to other entities
+ */
+ if (lock)
+ SetPagePrivate(sg_page(sg));
+ else
+ ClearPagePrivate(sg_page(sg));
+ }
+
+ kfree(chunk_list);
+ }
+
+ return ret;
+}
+
+int msm_secure_table(struct sg_table *table)
+{
+ int ret;
+
+ mutex_lock(&secure_buffer_mutex);
+ ret = secure_buffer_change_table(table, 1);
+ mutex_unlock(&secure_buffer_mutex);
+
+ return ret;
+}
+
+int msm_unsecure_table(struct sg_table *table)
+{
+ int ret;
+
+ mutex_lock(&secure_buffer_mutex);
+ ret = secure_buffer_change_table(table, 0);
+ mutex_unlock(&secure_buffer_mutex);
+
+ return ret;
+}
+
static struct dest_vm_and_perm_info *
populate_dest_info(int *dest_vmids, int nelements, int *dest_perms,
size_t *size_in_bytes)
@@ -279,6 +423,19 @@
}
}
+#define MAKE_CP_VERSION(major, minor, patch) \
+ (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
+
+bool msm_secure_v2_is_supported(void)
+{
+ /*
+ * if the version is < 1.1.0 then dynamic buffer allocation is
+ * not supported
+ */
+ return (scm_get_feat_version(FEATURE_ID_CP) >=
+ MAKE_CP_VERSION(1, 1, 0));
+}
+
static int __init alloc_secure_shared_memory(void)
{
int ret = 0;
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index 876e176..9ff0c73 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -840,6 +840,8 @@
ch->glink_state = GLINK_LOCAL_DISCONNECTED;
ch->txn_id = INITIAL_TXN_ID; /* use non-zero nonce for debug */
ch->pid = 0;
+ ch->actual_rx_size = 0;
+ ch->glink_rx_buf = NULL;
pr_debug("Channel closed [%s].\n", ch->name);
@@ -940,8 +942,8 @@
/* check for already pending data */
if (ch->actual_rx_size) {
- pr_debug("already pending data size [%zu]\n",
- ch->actual_rx_size);
+ pr_debug("already pending data size [%zu] ch [%s]\n",
+ ch->actual_rx_size, ch->name);
goto copy_buf;
}
@@ -949,24 +951,27 @@
reinit_completion(&ch->rx_done);
/* Wait for Rx response */
- pr_debug("Wait for Rx done.\n");
+ pr_debug("Wait for Rx done, ch [%s].\n", ch->name);
if (timeout_msec)
timeleft = wait_for_completion_timeout(&ch->rx_done, jiffies);
else
wait_for_completion(&ch->rx_done);
if (timeleft == 0) {
- pr_err("rx_done timeout [%d] msec expired.\n", timeout_msec);
+ pr_err("rx_done timeout [%d] msec expired, ch [%s]\n",
+ timeout_msec, ch->name);
mutex_unlock(&ch->lock);
return -ETIMEDOUT;
} else if (ch->rx_abort) {
mutex_unlock(&ch->lock);
- pr_err("rx_abort, probably remote side reset (SSR).\n");
+ pr_err("rx_abort, probably remote side reset (SSR), ch [%s].\n",
+ ch->name);
return -ERESTART; /* probably SSR */
} else if (ch->actual_rx_size) {
- pr_debug("actual_rx_size is [%zu]\n", ch->actual_rx_size);
+ pr_debug("actual_rx_size is [%zu], ch [%s]\n",
+ ch->actual_rx_size, ch->name);
} else {
- pr_err("actual_rx_size is zero.\n");
+ pr_err("actual_rx_size is zero, ch [%s].\n", ch->name);
goto exit_err;
}
@@ -980,7 +985,7 @@
size = min_t(int, ch->actual_rx_size, size);
memcpy(buf, ch->glink_rx_buf, size);
- pr_debug("copy size [%d].\n", (int) size);
+ pr_debug("copy size [%d] , ch [%s].\n", (int) size, ch->name);
/* free glink buffer after copy to spcom buffer */
glink_rx_done(ch->glink_handle, ch->glink_rx_buf, false);
@@ -993,7 +998,8 @@
pr_err("glink_queue_rx_intent() failed, ret [%d]", ret);
goto exit_err;
} else {
- pr_debug("queue rx_buf, size [%zu]\n", ch->rx_buf_size);
+ pr_debug("queue rx_buf, size [%zu], ch [%s]\n",
+ ch->rx_buf_size, ch->name);
}
mutex_unlock(&ch->lock);
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index d3819b6..4b686e6 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -1065,7 +1065,8 @@
{
struct pil_tz_data *d;
struct resource *res;
- u32 proxy_timeout;
+ struct device_node *crypto_node;
+ u32 proxy_timeout, crypto_id;
int len, rc;
d = devm_kzalloc(&pdev->dev, sizeof(*d), GFP_KERNEL);
@@ -1128,7 +1129,17 @@
rc);
return rc;
}
- scm_pas_init(MSM_BUS_MASTER_CRYPTO_CORE_0);
+
+ crypto_id = MSM_BUS_MASTER_CRYPTO_CORE_0;
+ crypto_node = of_parse_phandle(pdev->dev.of_node,
+ "qcom,mas-crypto", 0);
+ if (!IS_ERR_OR_NULL(crypto_node)) {
+ of_property_read_u32(crypto_node, "cell-id",
+ &crypto_id);
+ of_node_put(crypto_node);
+ }
+
+ scm_pas_init((int)crypto_id);
}
rc = pil_desc_init(&d->desc);
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index d6089aa..72dfb3d 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -726,6 +726,24 @@
return 0;
}
+static int qpnpint_irq_request_resources(struct irq_data *d)
+{
+ struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d);
+ u16 periph = HWIRQ_PER(d->hwirq);
+ u16 apid = HWIRQ_APID(d->hwirq);
+ u16 sid = HWIRQ_SID(d->hwirq);
+ u16 irq = HWIRQ_IRQ(d->hwirq);
+
+ if (pmic_arb->apid_data[apid].irq_owner != pmic_arb->ee) {
+ dev_err(&pmic_arb->spmic->dev, "failed to xlate sid = %#x, periph = %#x, irq = %u: ee=%u but owner=%u\n",
+ sid, periph, irq, pmic_arb->ee,
+ pmic_arb->apid_data[apid].irq_owner);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static struct irq_chip pmic_arb_irqchip = {
.name = "pmic_arb",
.irq_ack = qpnpint_irq_ack,
@@ -733,6 +751,7 @@
.irq_unmask = qpnpint_irq_unmask,
.irq_set_type = qpnpint_irq_set_type,
.irq_get_irqchip_state = qpnpint_get_irqchip_state,
+ .irq_request_resources = qpnpint_irq_request_resources,
.flags = IRQCHIP_MASK_ON_SUSPEND
| IRQCHIP_SKIP_SET_WAKE,
};
@@ -779,13 +798,6 @@
return rc;
}
- if (pa->apid_data[apid].irq_owner != pa->ee) {
- dev_err(&pa->spmic->dev, "failed to xlate sid = 0x%x, periph = 0x%x, irq = %u: ee=%u but owner=%u\n",
- intspec[0], intspec[1], intspec[2], pa->ee,
- pa->apid_data[apid].irq_owner);
- return -ENODEV;
- }
-
/* Keep track of {max,min}_apid for bounding search during interrupt */
if (apid > pa->max_apid)
pa->max_apid = apid;
diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile
index 309b9cc..f3c8d51 100644
--- a/drivers/staging/android/ion/Makefile
+++ b/drivers/staging/android/ion/Makefile
@@ -1,6 +1,8 @@
obj-$(CONFIG_ION) += ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \
- ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o \
- ion_system_secure_heap.o
+ ion_carveout_heap.o ion_chunk_heap.o ion_system_secure_heap.o
+ifdef CONFIG_ION_MSM
+obj-$(CONFIG_CMA) += ion_cma_heap.o ion_cma_secure_heap.o
+endif
obj-$(CONFIG_ION_TEST) += ion_test.o
ifdef CONFIG_COMPAT
obj-$(CONFIG_ION) += compat_ion.o
diff --git a/drivers/staging/android/ion/ion_cma_secure_heap.c b/drivers/staging/android/ion/ion_cma_secure_heap.c
new file mode 100644
index 0000000..b2eac28
--- /dev/null
+++ b/drivers/staging/android/ion/ion_cma_secure_heap.c
@@ -0,0 +1,902 @@
+/*
+ * drivers/staging/android/ion/ion_cma_secure_heap.c
+ *
+ * Copyright (C) Linaro 2012
+ * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/ion.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/msm_ion.h>
+#include <trace/events/kmem.h>
+
+#include <soc/qcom/secure_buffer.h>
+#include <asm/cacheflush.h>
+
+/* for ion_heap_ops structure */
+#include "ion_priv.h"
+
+#define ION_CMA_ALLOCATE_FAILED NULL
+
+struct ion_secure_cma_non_contig_info {
+ dma_addr_t phys;
+ int len;
+ struct list_head entry;
+};
+
+struct ion_secure_cma_buffer_info {
+ dma_addr_t phys;
+ struct sg_table *table;
+ bool is_cached;
+ int len;
+ struct list_head non_contig_list;
+ unsigned long ncelems;
+};
+
+struct ion_cma_alloc_chunk {
+ void *cpu_addr;
+ struct list_head entry;
+ dma_addr_t handle;
+ unsigned long chunk_size;
+ atomic_t cnt;
+};
+
+struct ion_cma_secure_heap {
+ struct device *dev;
+ /*
+ * Protects against races between threads allocating memory/adding to
+ * pool at the same time. (e.g. thread 1 adds to pool, thread 2
+ * allocates thread 1's memory before thread 1 knows it needs to
+ * allocate more.
+ * Admittedly this is fairly coarse grained right now but the chance for
+ * contention on this lock is unlikely right now. This can be changed if
+ * this ever changes in the future
+ */
+ struct mutex alloc_lock;
+ /*
+ * protects the list of memory chunks in this pool
+ */
+ struct mutex chunk_lock;
+ struct ion_heap heap;
+ /*
+ * Bitmap for allocation. This contains the aggregate of all chunks.
+ */
+ unsigned long *bitmap;
+ /*
+ * List of all allocated chunks
+ *
+ * This is where things get 'clever'. Individual allocations from
+ * dma_alloc_coherent must be allocated and freed in one chunk.
+ * We don't just want to limit the allocations to those confined
+ * within a single chunk (if clients allocate n small chunks we would
+ * never be able to use the combined size). The bitmap allocator is
+ * used to find the contiguous region and the parts of the chunks are
+ * marked off as used. The chunks won't be freed in the shrinker until
+ * the usage is actually zero.
+ */
+ struct list_head chunks;
+ int npages;
+ ion_phys_addr_t base;
+ struct work_struct work;
+ unsigned long last_alloc;
+ struct shrinker shrinker;
+ atomic_t total_allocated;
+ atomic_t total_pool_size;
+ atomic_t total_leaked;
+ unsigned long heap_size;
+ unsigned long default_prefetch_size;
+};
+
+static void ion_secure_pool_pages(struct work_struct *work);
+
+/*
+ * Create scatter-list for the already allocated DMA buffer.
+ * This function could be replace by dma_common_get_sgtable
+ * as soon as it will avalaible.
+ */
+static int ion_secure_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ dma_addr_t handle, size_t size)
+{
+ struct page *page = pfn_to_page(PFN_DOWN(handle));
+ int ret;
+
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ if (unlikely(ret))
+ return ret;
+
+ sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+ sg_dma_address(sgt->sgl) = handle;
+ return 0;
+}
+
+static int ion_secure_cma_add_to_pool(
+ struct ion_cma_secure_heap *sheap,
+ unsigned long len,
+ bool prefetch)
+{
+ void *cpu_addr;
+ dma_addr_t handle;
+ unsigned long attrs = 0;
+ int ret = 0;
+ struct ion_cma_alloc_chunk *chunk;
+
+ trace_ion_secure_cma_add_to_pool_start(len, atomic_read(&sheap->
+ total_pool_size), prefetch);
+ mutex_lock(&sheap->chunk_lock);
+
+ chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
+ if (!chunk) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ attrs = DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_SKIP_ZEROING;
+
+ cpu_addr = dma_alloc_attrs(sheap->dev, len, &handle, GFP_KERNEL,
+ attrs);
+
+ if (!cpu_addr) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ chunk->cpu_addr = cpu_addr;
+ chunk->handle = handle;
+ chunk->chunk_size = len;
+ atomic_set(&chunk->cnt, 0);
+ list_add(&chunk->entry, &sheap->chunks);
+ atomic_add(len, &sheap->total_pool_size);
+ /* clear the bitmap to indicate this region can be allocated from */
+ bitmap_clear(sheap->bitmap, (handle - sheap->base) >> PAGE_SHIFT,
+ len >> PAGE_SHIFT);
+ goto out;
+
+out_free:
+ kfree(chunk);
+out:
+ mutex_unlock(&sheap->chunk_lock);
+
+ trace_ion_secure_cma_add_to_pool_end(len, atomic_read(&sheap->
+ total_pool_size), prefetch);
+
+ return ret;
+}
+
+static void ion_secure_pool_pages(struct work_struct *work)
+{
+ struct ion_cma_secure_heap *sheap = container_of(work,
+ struct ion_cma_secure_heap, work);
+
+ ion_secure_cma_add_to_pool(sheap, sheap->last_alloc, true);
+}
+
+/*
+ * @s1: start of the first region
+ * @l1: length of the first region
+ * @s2: start of the second region
+ * @l2: length of the second region
+ *
+ * Returns the total number of bytes that intersect.
+ *
+ * s1 is the region we are trying to clear so s2 may be subsumed by s1 but the
+ * maximum size to clear should only ever be l1
+ *
+ */
+static unsigned int intersect(unsigned long s1, unsigned long l1,
+ unsigned long s2, unsigned long l2)
+{
+ unsigned long base1 = s1;
+ unsigned long end1 = s1 + l1;
+ unsigned long base2 = s2;
+ unsigned long end2 = s2 + l2;
+
+ /* Case 0: The regions don't overlap at all */
+ if (!(base1 < end2 && base2 < end1))
+ return 0;
+
+ /* Case 1: region 2 is subsumed by region 1 */
+ if (base1 <= base2 && end2 <= end1)
+ return l2;
+
+ /* case 2: region 1 is subsumed by region 2 */
+ if (base2 <= base1 && end1 <= end2)
+ return l1;
+
+ /* case 3: region1 overlaps region2 on the bottom */
+ if (base2 < end1 && base2 > base1)
+ return end1 - base2;
+
+ /* case 4: region 2 overlaps region1 on the bottom */
+ if (base1 < end2 && base1 > base2)
+ return end2 - base1;
+
+ pr_err("Bad math! Did not detect chunks correctly! %lx %lx %lx %lx\n",
+ s1, l1, s2, l2);
+ WARN_ON(1);
+ /* retrun max intersection value, so that it will fail later*/
+ return (unsigned int)(~0);
+}
+
+int ion_secure_cma_prefetch(struct ion_heap *heap, void *data)
+{
+ unsigned long len = (unsigned long)data;
+ struct ion_cma_secure_heap *sheap =
+ container_of(heap, struct ion_cma_secure_heap, heap);
+ unsigned long diff;
+
+ if ((int)heap->type != ION_HEAP_TYPE_SECURE_DMA)
+ return -EINVAL;
+
+ if (len == 0)
+ len = sheap->default_prefetch_size;
+
+ /*
+ * Only prefetch as much space as there is left in the pool so
+ * check against the current free size of the heap.
+ * This is slightly racy if someone else is allocating at the same
+ * time. CMA has a restricted size for the heap so worst case
+ * the prefetch doesn't work because the allocation fails.
+ */
+ diff = sheap->heap_size - atomic_read(&sheap->total_pool_size);
+
+ if (len > diff)
+ len = diff;
+
+ sheap->last_alloc = len;
+ trace_ion_prefetching(sheap->last_alloc);
+ schedule_work(&sheap->work);
+
+ return 0;
+}
+
+static void bad_math_dump(unsigned long len, int total_overlap,
+ struct ion_cma_secure_heap *sheap,
+ bool alloc, dma_addr_t paddr)
+{
+ struct list_head *entry;
+
+ pr_err("Bad math! expected total was %lx actual was %x\n",
+ len, total_overlap);
+ pr_err("attempted %s address was %pa len %lx\n",
+ alloc ? "allocation" : "free", &paddr, len);
+ pr_err("chunks:\n");
+ list_for_each(entry, &sheap->chunks) {
+ struct ion_cma_alloc_chunk *chunk =
+ container_of(entry,
+ struct ion_cma_alloc_chunk, entry);
+ pr_info("--- pa %pa len %lx\n",
+ &chunk->handle, chunk->chunk_size);
+ }
+ WARN(1, "mismatch in the sizes of secure cma chunks\n");
+}
+
+static int ion_secure_cma_alloc_from_pool(
+ struct ion_cma_secure_heap *sheap,
+ dma_addr_t *phys,
+ unsigned long len)
+{
+ dma_addr_t paddr;
+ unsigned long page_no;
+ int ret = 0;
+ int total_overlap = 0;
+ struct list_head *entry;
+
+ mutex_lock(&sheap->chunk_lock);
+
+ page_no = bitmap_find_next_zero_area(sheap->bitmap,
+ sheap->npages, 0,
+ len >> PAGE_SHIFT, 0);
+ if (page_no >= sheap->npages) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ bitmap_set(sheap->bitmap, page_no, len >> PAGE_SHIFT);
+ paddr = sheap->base + (page_no << PAGE_SHIFT);
+
+ list_for_each(entry, &sheap->chunks) {
+ struct ion_cma_alloc_chunk *chunk = container_of(entry,
+ struct ion_cma_alloc_chunk, entry);
+ int overlap = intersect(chunk->handle,
+ chunk->chunk_size, paddr, len);
+
+ atomic_add(overlap, &chunk->cnt);
+ total_overlap += overlap;
+ }
+
+ if (total_overlap != len) {
+ bad_math_dump(len, total_overlap, sheap, 1, paddr);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ *phys = paddr;
+out:
+ mutex_unlock(&sheap->chunk_lock);
+ return ret;
+}
+
+static void ion_secure_cma_free_chunk(struct ion_cma_secure_heap *sheap,
+ struct ion_cma_alloc_chunk *chunk)
+{
+ unsigned long attrs = 0;
+
+ attrs = DMA_ATTR_NO_KERNEL_MAPPING;
+ /* This region is 'allocated' and not available to allocate from */
+ bitmap_set(sheap->bitmap, (chunk->handle - sheap->base) >> PAGE_SHIFT,
+ chunk->chunk_size >> PAGE_SHIFT);
+ dma_free_attrs(sheap->dev, chunk->chunk_size, chunk->cpu_addr,
+ chunk->handle, attrs);
+ atomic_sub(chunk->chunk_size, &sheap->total_pool_size);
+ list_del(&chunk->entry);
+ kfree(chunk);
+}
+
+static void __ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap,
+ int max_nr)
+{
+ struct list_head *entry, *_n;
+ unsigned long drained_size = 0, skipped_size = 0;
+
+ trace_ion_secure_cma_shrink_pool_start(drained_size, skipped_size);
+
+ list_for_each_safe(entry, _n, &sheap->chunks) {
+ struct ion_cma_alloc_chunk *chunk = container_of(entry,
+ struct ion_cma_alloc_chunk, entry);
+
+ if (max_nr < 0)
+ break;
+
+ if (atomic_read(&chunk->cnt) == 0) {
+ max_nr -= chunk->chunk_size;
+ drained_size += chunk->chunk_size;
+ ion_secure_cma_free_chunk(sheap, chunk);
+ } else {
+ skipped_size += chunk->chunk_size;
+ }
+ }
+
+ trace_ion_secure_cma_shrink_pool_end(drained_size, skipped_size);
+}
+
+int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused)
+{
+ struct ion_cma_secure_heap *sheap =
+ container_of(heap, struct ion_cma_secure_heap, heap);
+
+ mutex_lock(&sheap->chunk_lock);
+ __ion_secure_cma_shrink_pool(sheap, INT_MAX);
+ mutex_unlock(&sheap->chunk_lock);
+
+ return 0;
+}
+
+static unsigned long ion_secure_cma_shrinker(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ struct ion_cma_secure_heap *sheap = container_of(shrinker,
+ struct ion_cma_secure_heap, shrinker);
+ int nr_to_scan = sc->nr_to_scan;
+
+ /*
+ * Allocation path may invoke the shrinker. Proceeding any further
+ * would cause a deadlock in several places so don't shrink if that
+ * happens.
+ */
+ if (!mutex_trylock(&sheap->chunk_lock))
+ return -EAGAIN;
+
+ __ion_secure_cma_shrink_pool(sheap, nr_to_scan);
+
+ mutex_unlock(&sheap->chunk_lock);
+
+ return atomic_read(&sheap->total_pool_size);
+}
+
+static unsigned long ion_secure_cma_shrinker_count(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ struct ion_cma_secure_heap *sheap = container_of(shrinker,
+ struct ion_cma_secure_heap, shrinker);
+ return atomic_read(&sheap->total_pool_size);
+}
+
+static void ion_secure_cma_free_from_pool(struct ion_cma_secure_heap *sheap,
+ dma_addr_t handle,
+ unsigned long len)
+{
+ struct list_head *entry, *_n;
+ int total_overlap = 0;
+
+ mutex_lock(&sheap->chunk_lock);
+ bitmap_clear(sheap->bitmap, (handle - sheap->base) >> PAGE_SHIFT,
+ len >> PAGE_SHIFT);
+
+ list_for_each_safe(entry, _n, &sheap->chunks) {
+ struct ion_cma_alloc_chunk *chunk = container_of(entry,
+ struct ion_cma_alloc_chunk, entry);
+ int overlap = intersect(chunk->handle,
+ chunk->chunk_size, handle, len);
+
+ /*
+ * Don't actually free this from the pool list yet, let either
+ * an explicit drain call or the shrinkers take care of the
+ * pool.
+ */
+ atomic_sub_return(overlap, &chunk->cnt);
+ if (atomic_read(&chunk->cnt) < 0) {
+ WARN(1, "Invalid chunk size of %d\n",
+ atomic_read(&chunk->cnt));
+ goto out;
+ }
+
+ total_overlap += overlap;
+ }
+
+ if (atomic_read(&sheap->total_pool_size) < 0) {
+ WARN(1, "total pool size of %d is unexpected\n",
+ atomic_read(&sheap->total_pool_size));
+ goto out;
+ }
+
+ if (total_overlap != len)
+ bad_math_dump(len, total_overlap, sheap, 0, handle);
+out:
+ mutex_unlock(&sheap->chunk_lock);
+}
+
+/* ION CMA heap operations functions */
+static struct ion_secure_cma_buffer_info *__ion_secure_cma_allocate(
+ struct ion_heap *heap, struct ion_buffer *buffer,
+ unsigned long len, unsigned long align,
+ unsigned long flags)
+{
+ struct ion_cma_secure_heap *sheap =
+ container_of(heap, struct ion_cma_secure_heap, heap);
+ struct ion_secure_cma_buffer_info *info;
+ int ret;
+
+ dev_dbg(sheap->dev, "Request buffer allocation len %ld\n", len);
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return ION_CMA_ALLOCATE_FAILED;
+
+ mutex_lock(&sheap->alloc_lock);
+ ret = ion_secure_cma_alloc_from_pool(sheap, &info->phys, len);
+
+ if (ret) {
+retry:
+ ret = ion_secure_cma_add_to_pool(sheap, len, false);
+ if (ret) {
+ mutex_unlock(&sheap->alloc_lock);
+ dev_err(sheap->dev, "Fail to allocate buffer\n");
+ goto err;
+ }
+ ret = ion_secure_cma_alloc_from_pool(sheap, &info->phys, len);
+ if (ret) {
+ /*
+ * Lost the race with the shrinker, try again
+ */
+ goto retry;
+ }
+ }
+ mutex_unlock(&sheap->alloc_lock);
+
+ atomic_add(len, &sheap->total_allocated);
+ info->table = kmalloc(sizeof(*info->table), GFP_KERNEL);
+ if (!info->table) {
+ dev_err(sheap->dev, "Fail to allocate sg table\n");
+ goto err;
+ }
+
+ info->len = len;
+ ion_secure_cma_get_sgtable(sheap->dev,
+ info->table, info->phys, len);
+
+ /* keep this for memory release */
+ buffer->priv_virt = info;
+ dev_dbg(sheap->dev, "Allocate buffer %pK\n", buffer);
+ return info;
+
+err:
+ kfree(info);
+ return ION_CMA_ALLOCATE_FAILED;
+}
+
+static void __ion_secure_cma_free_non_contig(struct ion_cma_secure_heap *sheap,
+ struct ion_secure_cma_buffer_info
+ *info)
+{
+ struct ion_secure_cma_non_contig_info *nc_info, *temp;
+
+ list_for_each_entry_safe(nc_info, temp, &info->non_contig_list, entry) {
+ ion_secure_cma_free_from_pool(sheap, nc_info->phys,
+ nc_info->len);
+ list_del(&nc_info->entry);
+ kfree(nc_info);
+ }
+}
+
+static void __ion_secure_cma_free(struct ion_cma_secure_heap *sheap,
+ struct ion_secure_cma_buffer_info *info,
+ bool release_memory)
+{
+ if (release_memory) {
+ if (info->ncelems)
+ __ion_secure_cma_free_non_contig(sheap, info);
+ else
+ ion_secure_cma_free_from_pool(sheap, info->phys,
+ info->len);
+ }
+ sg_free_table(info->table);
+ kfree(info->table);
+ kfree(info);
+}
+
+static struct ion_secure_cma_buffer_info *__ion_secure_cma_allocate_non_contig(
+ struct ion_heap *heap, struct ion_buffer *buffer,
+ unsigned long len, unsigned long align,
+ unsigned long flags)
+{
+ struct ion_cma_secure_heap *sheap =
+ container_of(heap, struct ion_cma_secure_heap, heap);
+ struct ion_secure_cma_buffer_info *info;
+ int ret;
+ unsigned long alloc_size = len;
+ struct ion_secure_cma_non_contig_info *nc_info, *temp;
+ unsigned long ncelems = 0;
+ struct scatterlist *sg;
+ unsigned long total_allocated = 0;
+
+ dev_dbg(sheap->dev, "Request buffer allocation len %ld\n", len);
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return ION_CMA_ALLOCATE_FAILED;
+
+ INIT_LIST_HEAD(&info->non_contig_list);
+ info->table = kmalloc(sizeof(*info->table), GFP_KERNEL);
+ if (!info->table) {
+ dev_err(sheap->dev, "Fail to allocate sg table\n");
+ goto err;
+ }
+ mutex_lock(&sheap->alloc_lock);
+ while (total_allocated < len) {
+ if (alloc_size < SZ_1M) {
+ pr_err("Cannot allocate less than 1MB\n");
+ goto err2;
+ }
+ nc_info = kzalloc(sizeof(*nc_info), GFP_KERNEL);
+ if (!nc_info)
+ goto err2;
+
+ ret = ion_secure_cma_alloc_from_pool(sheap, &nc_info->phys,
+ alloc_size);
+ if (ret) {
+retry:
+ ret = ion_secure_cma_add_to_pool(sheap, alloc_size,
+ false);
+ if (ret) {
+ alloc_size = alloc_size / 2;
+ if (!IS_ALIGNED(alloc_size, SZ_1M))
+ alloc_size = round_down(alloc_size,
+ SZ_1M);
+ kfree(nc_info);
+ continue;
+ }
+ ret = ion_secure_cma_alloc_from_pool(sheap,
+ &nc_info->phys,
+ alloc_size);
+ if (ret) {
+ /*
+ * Lost the race with the shrinker, try again
+ */
+ goto retry;
+ }
+ }
+ nc_info->len = alloc_size;
+ list_add_tail(&nc_info->entry, &info->non_contig_list);
+ ncelems++;
+ total_allocated += alloc_size;
+ alloc_size = min(alloc_size, len - total_allocated);
+ }
+ mutex_unlock(&sheap->alloc_lock);
+ atomic_add(total_allocated, &sheap->total_allocated);
+
+ nc_info = list_first_entry_or_null(&info->non_contig_list,
+ struct
+ ion_secure_cma_non_contig_info,
+ entry);
+ if (!nc_info) {
+ pr_err("%s: Unable to find first entry of non contig list\n",
+ __func__);
+ goto err1;
+ }
+ info->phys = nc_info->phys;
+ info->len = total_allocated;
+ info->ncelems = ncelems;
+
+ ret = sg_alloc_table(info->table, ncelems, GFP_KERNEL);
+ if (unlikely(ret))
+ goto err1;
+
+ sg = info->table->sgl;
+ list_for_each_entry(nc_info, &info->non_contig_list, entry) {
+ sg_set_page(sg, phys_to_page(nc_info->phys), nc_info->len, 0);
+ sg_dma_address(sg) = nc_info->phys;
+ sg = sg_next(sg);
+ }
+ buffer->priv_virt = info;
+ dev_dbg(sheap->dev, "Allocate buffer %pK\n", buffer);
+ return info;
+
+err2:
+ mutex_unlock(&sheap->alloc_lock);
+err1:
+ list_for_each_entry_safe(nc_info, temp, &info->non_contig_list,
+ entry) {
+ list_del(&nc_info->entry);
+ kfree(nc_info);
+ }
+ kfree(info->table);
+err:
+ kfree(info);
+ return ION_CMA_ALLOCATE_FAILED;
+}
+
+static int ion_secure_cma_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long len, unsigned long align,
+ unsigned long flags)
+{
+ unsigned long secure_allocation = flags & ION_FLAG_SECURE;
+ struct ion_secure_cma_buffer_info *buf = NULL;
+ unsigned long allow_non_contig = flags & ION_FLAG_ALLOW_NON_CONTIG;
+
+ if (!secure_allocation &&
+ !ion_heap_allow_secure_allocation(heap->type)) {
+ pr_err("%s: non-secure allocation disallowed from heap %s %lx\n",
+ __func__, heap->name, flags);
+ return -ENOMEM;
+ }
+
+ if (ION_IS_CACHED(flags)) {
+ pr_err("%s: cannot allocate cached memory from secure heap %s\n",
+ __func__, heap->name);
+ return -ENOMEM;
+ }
+
+ if (!IS_ALIGNED(len, SZ_1M)) {
+ pr_err("%s: length of allocation from %s must be a multiple of 1MB\n",
+ __func__, heap->name);
+ return -ENOMEM;
+ }
+ trace_ion_secure_cma_allocate_start(heap->name, len, align, flags);
+ if (!allow_non_contig)
+ buf = __ion_secure_cma_allocate(heap, buffer, len, align,
+ flags);
+ else
+ buf = __ion_secure_cma_allocate_non_contig(heap, buffer, len,
+ align, flags);
+ trace_ion_secure_cma_allocate_end(heap->name, len, align, flags);
+ if (buf) {
+ int ret;
+
+ if (!msm_secure_v2_is_supported()) {
+ pr_err("%s: securing buffers from clients is not supported on this platform\n",
+ __func__);
+ ret = 1;
+ } else {
+ trace_ion_cp_secure_buffer_start(heap->name, len, align,
+ flags);
+ ret = msm_secure_table(buf->table);
+ trace_ion_cp_secure_buffer_end(heap->name, len, align,
+ flags);
+ }
+ if (ret) {
+ struct ion_cma_secure_heap *sheap =
+ container_of(buffer->heap,
+ struct ion_cma_secure_heap, heap);
+
+ pr_err("%s: failed to secure buffer\n", __func__);
+ __ion_secure_cma_free(sheap, buf, true);
+ }
+ return ret;
+ } else {
+ return -ENOMEM;
+ }
+}
+
+static void ion_secure_cma_free(struct ion_buffer *buffer)
+{
+ struct ion_cma_secure_heap *sheap =
+ container_of(buffer->heap, struct ion_cma_secure_heap, heap);
+ struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
+ int ret = 0;
+
+ dev_dbg(sheap->dev, "Release buffer %pK\n", buffer);
+ if (msm_secure_v2_is_supported())
+ ret = msm_unsecure_table(info->table);
+ atomic_sub(buffer->size, &sheap->total_allocated);
+ if (atomic_read(&sheap->total_allocated) < 0) {
+ WARN(1, "no memory is allocated from this pool\n");
+ return;
+ }
+
+ /* release memory */
+ if (ret) {
+ WARN(1, "Unsecure failed, can't free the memory. Leaking it!");
+ atomic_add(buffer->size, &sheap->total_leaked);
+ }
+
+ __ion_secure_cma_free(sheap, info, ret ? false : true);
+}
+
+static int ion_secure_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct ion_cma_secure_heap *sheap =
+ container_of(heap, struct ion_cma_secure_heap, heap);
+ struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
+
+ dev_dbg(sheap->dev, "Return buffer %pK physical address 0x%pa\n",
+ buffer, &info->phys);
+
+ *addr = info->phys;
+ *len = buffer->size;
+
+ return 0;
+}
+
+static struct sg_table *ion_secure_cma_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
+
+ return info->table;
+}
+
+static void ion_secure_cma_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+}
+
+static int ion_secure_cma_mmap(struct ion_heap *mapper,
+ struct ion_buffer *buffer,
+ struct vm_area_struct *vma)
+{
+ pr_info("%s: mmaping from secure heap %s disallowed\n",
+ __func__, mapper->name);
+ return -EINVAL;
+}
+
+static void *ion_secure_cma_map_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ pr_info("%s: kernel mapping from secure heap %s disallowed\n",
+ __func__, heap->name);
+ return ERR_PTR(-EINVAL);
+}
+
+static void ion_secure_cma_unmap_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+}
+
+static int ion_secure_cma_print_debug(struct ion_heap *heap, struct seq_file *s,
+ const struct list_head *mem_map)
+{
+ struct ion_cma_secure_heap *sheap =
+ container_of(heap, struct ion_cma_secure_heap, heap);
+
+ if (mem_map) {
+ struct mem_map_data *data;
+
+ seq_puts(s, "\nMemory Map\n");
+ seq_printf(s, "%16.s %14.s %14.s %14.s\n",
+ "client", "start address", "end address",
+ "size");
+
+ list_for_each_entry(data, mem_map, node) {
+ const char *client_name = "(null)";
+
+ if (data->client_name)
+ client_name = data->client_name;
+
+ seq_printf(s, "%16.s 0x%14pa 0x%14pa %14lu (0x%lx)\n",
+ client_name, &data->addr,
+ &data->addr_end,
+ data->size, data->size);
+ }
+ }
+ seq_printf(s, "Total allocated: 0x%x\n",
+ atomic_read(&sheap->total_allocated));
+ seq_printf(s, "Total pool size: 0x%x\n",
+ atomic_read(&sheap->total_pool_size));
+ seq_printf(s, "Total memory leaked due to unlock failures: 0x%x\n",
+ atomic_read(&sheap->total_leaked));
+
+ return 0;
+}
+
+static struct ion_heap_ops ion_secure_cma_ops = {
+ .allocate = ion_secure_cma_allocate,
+ .free = ion_secure_cma_free,
+ .map_dma = ion_secure_cma_heap_map_dma,
+ .unmap_dma = ion_secure_cma_heap_unmap_dma,
+ .phys = ion_secure_cma_phys,
+ .map_user = ion_secure_cma_mmap,
+ .map_kernel = ion_secure_cma_map_kernel,
+ .unmap_kernel = ion_secure_cma_unmap_kernel,
+ .print_debug = ion_secure_cma_print_debug,
+};
+
+struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *data)
+{
+ struct ion_cma_secure_heap *sheap;
+ int map_size = BITS_TO_LONGS(data->size >> PAGE_SHIFT) * sizeof(long);
+
+ sheap = kzalloc(sizeof(*sheap), GFP_KERNEL);
+ if (!sheap)
+ return ERR_PTR(-ENOMEM);
+
+ sheap->dev = data->priv;
+ mutex_init(&sheap->chunk_lock);
+ mutex_init(&sheap->alloc_lock);
+ sheap->heap.ops = &ion_secure_cma_ops;
+ sheap->heap.type = ION_HEAP_TYPE_SECURE_DMA;
+ sheap->npages = data->size >> PAGE_SHIFT;
+ sheap->base = data->base;
+ sheap->heap_size = data->size;
+ sheap->bitmap = kmalloc(map_size, GFP_KERNEL);
+ INIT_LIST_HEAD(&sheap->chunks);
+ INIT_WORK(&sheap->work, ion_secure_pool_pages);
+ sheap->shrinker.seeks = DEFAULT_SEEKS;
+ sheap->shrinker.batch = 0;
+ sheap->shrinker.scan_objects = ion_secure_cma_shrinker;
+ sheap->shrinker.count_objects = ion_secure_cma_shrinker_count;
+ sheap->default_prefetch_size = sheap->heap_size;
+ register_shrinker(&sheap->shrinker);
+
+ if (!sheap->bitmap) {
+ kfree(sheap);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (data->extra_data) {
+ struct ion_cma_pdata *extra = data->extra_data;
+
+ sheap->default_prefetch_size = extra->default_prefetch_size;
+ }
+
+ /*
+ * we initially mark everything in the allocator as being free so that
+ * allocations can come in later
+ */
+ bitmap_fill(sheap->bitmap, sheap->npages);
+
+ return &sheap->heap;
+}
+
+void ion_secure_cma_heap_destroy(struct ion_heap *heap)
+{
+ struct ion_cma_secure_heap *sheap =
+ container_of(heap, struct ion_cma_secure_heap, heap);
+
+ kfree(sheap);
+}
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index bb119cc..775c666 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -26,6 +26,7 @@
#include <linux/rbtree.h>
#include <linux/seq_file.h>
+#include "msm_ion_priv.h"
#include <linux/sched.h>
#include <linux/shrinker.h>
#include <linux/types.h>
diff --git a/drivers/staging/android/ion/msm/msm_ion.c b/drivers/staging/android/ion/msm/msm_ion.c
index 9d53391..3771726 100644
--- a/drivers/staging/android/ion/msm/msm_ion.c
+++ b/drivers/staging/android/ion/msm/msm_ion.c
@@ -404,6 +404,28 @@
ret = -ENOMEM;
break;
}
+ case ION_HEAP_TYPE_SECURE_DMA:
+ {
+ unsigned int val;
+ struct ion_cma_pdata *extra = NULL;
+
+ ret = of_property_read_u32(node,
+ "qcom,default-prefetch-size", &val);
+ if (!ret) {
+ heap->extra_data = kzalloc(sizeof(*extra),
+ GFP_KERNEL);
+
+ if (!heap->extra_data) {
+ ret = -ENOMEM;
+ } else {
+ extra = heap->extra_data;
+ extra->default_prefetch_size = val;
+ }
+ } else {
+ ret = 0;
+ }
+ break;
+ }
default:
heap->extra_data = 0;
break;
@@ -423,6 +445,7 @@
MAKE_HEAP_TYPE_MAPPING(CARVEOUT),
MAKE_HEAP_TYPE_MAPPING(CHUNK),
MAKE_HEAP_TYPE_MAPPING(DMA),
+ MAKE_HEAP_TYPE_MAPPING(SECURE_DMA),
MAKE_HEAP_TYPE_MAPPING(SYSTEM_SECURE),
MAKE_HEAP_TYPE_MAPPING(HYP_CMA),
};
@@ -609,6 +632,16 @@
return type == ((enum ion_heap_type)ION_HEAP_TYPE_SYSTEM_SECURE);
}
+int ion_heap_allow_secure_allocation(enum ion_heap_type type)
+{
+ return type == ((enum ion_heap_type)ION_HEAP_TYPE_SECURE_DMA);
+}
+
+int ion_heap_allow_handle_secure(enum ion_heap_type type)
+{
+ return type == ((enum ion_heap_type)ION_HEAP_TYPE_SECURE_DMA);
+}
+
int ion_heap_allow_heap_secure(enum ion_heap_type type)
{
return false;
@@ -796,6 +829,13 @@
int ret;
ret = ion_walk_heaps(client, data.prefetch_data.heap_id,
+ ION_HEAP_TYPE_SECURE_DMA,
+ (void *)data.prefetch_data.len,
+ ion_secure_cma_prefetch);
+ if (ret)
+ return ret;
+
+ ret = ion_walk_heaps(client, data.prefetch_data.heap_id,
ION_HEAP_TYPE_SYSTEM_SECURE,
(void *)&data.prefetch_data,
ion_system_secure_heap_prefetch);
@@ -806,6 +846,13 @@
case ION_IOC_DRAIN:
{
int ret;
+ ret = ion_walk_heaps(client, data.prefetch_data.heap_id,
+ ION_HEAP_TYPE_SECURE_DMA,
+ (void *)data.prefetch_data.len,
+ ion_secure_cma_drain_pool);
+
+ if (ret)
+ return ret;
ret = ion_walk_heaps(client, data.prefetch_data.heap_id,
ION_HEAP_TYPE_SYSTEM_SECURE,
@@ -959,6 +1006,11 @@
struct ion_heap *heap = NULL;
switch ((int)heap_data->type) {
+#ifdef CONFIG_CMA
+ case ION_HEAP_TYPE_SECURE_DMA:
+ heap = ion_secure_cma_heap_create(heap_data);
+ break;
+#endif
case ION_HEAP_TYPE_SYSTEM_SECURE:
heap = ion_system_secure_heap_create(heap_data);
break;
@@ -988,6 +1040,11 @@
return;
switch ((int)heap->type) {
+#ifdef CONFIG_CMA
+ case ION_HEAP_TYPE_SECURE_DMA:
+ ion_secure_cma_heap_destroy(heap);
+ break;
+#endif
case ION_HEAP_TYPE_SYSTEM_SECURE:
ion_system_secure_heap_destroy(heap);
break;
diff --git a/drivers/staging/android/ion/msm_ion_priv.h b/drivers/staging/android/ion/msm_ion_priv.h
new file mode 100644
index 0000000..bbf2e8b
--- /dev/null
+++ b/drivers/staging/android/ion/msm_ion_priv.h
@@ -0,0 +1,112 @@
+/*
+ * drivers/staging/android/ion/msm_ion_priv.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MSM_ION_PRIV_H
+#define _MSM_ION_PRIV_H
+
+#include <linux/kref.h>
+#include <linux/mm_types.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/seq_file.h>
+
+struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap);
+void ion_iommu_heap_destroy(struct ion_heap *heap);
+
+struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap);
+void ion_cp_heap_destroy(struct ion_heap *heap);
+
+struct ion_heap *ion_system_secure_heap_create(struct ion_platform_heap *heap);
+void ion_system_secure_heap_destroy(struct ion_heap *heap);
+int ion_system_secure_heap_prefetch(struct ion_heap *heap, void *data);
+int ion_system_secure_heap_drain(struct ion_heap *heap, void *data);
+
+struct ion_heap *ion_cma_secure_heap_create(struct ion_platform_heap *heap);
+void ion_cma_secure_heap_destroy(struct ion_heap *heap);
+
+long msm_ion_custom_ioctl(struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg);
+
+#ifdef CONFIG_CMA
+struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *heap);
+void ion_secure_cma_heap_destroy(struct ion_heap *heap);
+
+int ion_secure_cma_prefetch(struct ion_heap *heap, void *data);
+
+int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused);
+
+#else
+static inline int ion_secure_cma_prefetch(struct ion_heap *heap, void *data)
+{
+ return -ENODEV;
+}
+
+static inline int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused)
+{
+ return -ENODEV;
+}
+
+#endif
+
+struct ion_heap *ion_removed_heap_create(struct ion_platform_heap *pheap);
+void ion_removed_heap_destroy(struct ion_heap *heap);
+
+#define ION_CP_ALLOCATE_FAIL -1
+#define ION_RESERVED_ALLOCATE_FAIL -1
+
+void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base,
+ unsigned long *size);
+
+void ion_mem_map_show(struct ion_heap *heap);
+
+int ion_heap_is_system_secure_heap_type(enum ion_heap_type type);
+
+int ion_heap_allow_secure_allocation(enum ion_heap_type type);
+
+int ion_heap_allow_heap_secure(enum ion_heap_type type);
+
+int ion_heap_allow_handle_secure(enum ion_heap_type type);
+
+int get_secure_vmid(unsigned long flags);
+
+bool is_secure_vmid_valid(int vmid);
+
+/**
+ * Functions to help assign/unassign sg_table for System Secure Heap
+ */
+
+int ion_system_secure_heap_unassign_sg(struct sg_table *sgt, int source_vmid);
+int ion_system_secure_heap_assign_sg(struct sg_table *sgt, int dest_vmid);
+
+/**
+ * ion_create_chunked_sg_table - helper function to create sg table
+ * with specified chunk size
+ * @buffer_base: The starting address used for the sg dma address
+ * @chunk_size: The size of each entry in the sg table
+ * @total_size: The total size of the sg table (i.e. the sum of the
+ * entries). This will be rounded up to the nearest
+ * multiple of `chunk_size'
+ */
+struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
+ size_t chunk_size,
+ size_t total_size);
+
+void show_ion_usage(struct ion_device *dev);
+#endif /* _MSM_ION_PRIV_H */
diff --git a/drivers/staging/android/uapi/msm_ion.h b/drivers/staging/android/uapi/msm_ion.h
index 4f9dd73..d510fda 100644
--- a/drivers/staging/android/uapi/msm_ion.h
+++ b/drivers/staging/android/uapi/msm_ion.h
@@ -92,6 +92,11 @@
#define ION_FLAG_CP_CAMERA_PREVIEW ION_BIT(27)
#define ION_FLAG_CP_SPSS_HLOS_SHARED ION_BIT(30)
+/**
+ * Flag to allow non continguous allocation of memory from secure
+ * heap
+ */
+#define ION_FLAG_ALLOW_NON_CONTIG ION_BIT(28)
/**
* Flag to use when allocating to indicate that a heap is secure.
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 598a67d..ceacf3d 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -10,3 +10,5 @@
libcomposite-y += composite.o functions.o configfs.o u_f.o
obj-$(CONFIG_USB_GADGET) += udc/ function/ legacy/
+
+obj-$(CONFIG_USB_CI13XXX_MSM) += ci13xxx_msm.o
diff --git a/drivers/usb/gadget/ci13xxx_msm.c b/drivers/usb/gadget/ci13xxx_msm.c
new file mode 100644
index 0000000..78b7d3a
--- /dev/null
+++ b/drivers/usb/gadget/ci13xxx_msm.c
@@ -0,0 +1,556 @@
+/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/usb/msm_hsusb_hw.h>
+#include <linux/usb/ulpi.h>
+#include <linux/gpio.h>
+#include <linux/pinctrl/consumer.h>
+
+#include "ci13xxx_udc.c"
+
+#define MSM_USB_BASE (udc->regs)
+
+#define CI13XXX_MSM_MAX_LOG2_ITC 7
+
+struct ci13xxx_udc_context {
+ int irq;
+ void __iomem *regs;
+ int wake_gpio;
+ int wake_irq;
+ bool wake_irq_state;
+ struct pinctrl *ci13xxx_pinctrl;
+ struct timer_list irq_enable_timer;
+ bool irq_disabled;
+};
+
+static struct ci13xxx_udc_context _udc_ctxt;
+#define IRQ_ENABLE_DELAY (jiffies + msecs_to_jiffies(1000))
+
+static irqreturn_t msm_udc_irq(int irq, void *data)
+{
+ return udc_irq();
+}
+
+static void ci13xxx_msm_suspend(void)
+{
+ struct device *dev = _udc->gadget.dev.parent;
+
+ dev_dbg(dev, "ci13xxx_msm_suspend\n");
+
+ if (_udc_ctxt.wake_irq && !_udc_ctxt.wake_irq_state) {
+ enable_irq_wake(_udc_ctxt.wake_irq);
+ enable_irq(_udc_ctxt.wake_irq);
+ _udc_ctxt.wake_irq_state = true;
+ }
+}
+
+static void ci13xxx_msm_resume(void)
+{
+ struct device *dev = _udc->gadget.dev.parent;
+
+ dev_dbg(dev, "ci13xxx_msm_resume\n");
+
+ if (_udc_ctxt.wake_irq && _udc_ctxt.wake_irq_state) {
+ disable_irq_wake(_udc_ctxt.wake_irq);
+ disable_irq_nosync(_udc_ctxt.wake_irq);
+ _udc_ctxt.wake_irq_state = false;
+ }
+}
+
+static void ci13xxx_msm_disconnect(void)
+{
+ struct ci13xxx *udc = _udc;
+ struct usb_phy *phy = udc->transceiver;
+
+ if (phy && (phy->flags & ENABLE_DP_MANUAL_PULLUP)) {
+ u32 temp;
+
+ usb_phy_io_write(phy,
+ ULPI_MISC_A_VBUSVLDEXT |
+ ULPI_MISC_A_VBUSVLDEXTSEL,
+ ULPI_CLR(ULPI_MISC_A));
+
+ /* Notify LINK of VBUS LOW */
+ temp = readl_relaxed(USB_USBCMD);
+ temp &= ~USBCMD_SESS_VLD_CTRL;
+ writel_relaxed(temp, USB_USBCMD);
+
+ /*
+ * Add memory barrier as it is must to complete
+ * above USB PHY and Link register writes before
+ * moving ahead with USB peripheral mode enumeration,
+ * otherwise USB peripheral mode may not work.
+ */
+ mb();
+ }
+}
+
+/* Link power management will reduce power consumption by
+ * short time HW suspend/resume.
+ */
+static void ci13xxx_msm_set_l1(struct ci13xxx *udc)
+{
+ int temp;
+ struct device *dev = udc->gadget.dev.parent;
+
+ dev_dbg(dev, "Enable link power management\n");
+
+ /* Enable remote wakeup and L1 for IN EPs */
+ writel_relaxed(0xffff0000, USB_L1_EP_CTRL);
+
+ temp = readl_relaxed(USB_L1_CONFIG);
+ temp |= L1_CONFIG_LPM_EN | L1_CONFIG_REMOTE_WAKEUP |
+ L1_CONFIG_GATE_SYS_CLK | L1_CONFIG_PHY_LPM |
+ L1_CONFIG_PLL;
+ writel_relaxed(temp, USB_L1_CONFIG);
+}
+
+static void ci13xxx_msm_connect(void)
+{
+ struct ci13xxx *udc = _udc;
+ struct usb_phy *phy = udc->transceiver;
+
+ if (phy && (phy->flags & ENABLE_DP_MANUAL_PULLUP)) {
+ int temp;
+
+ usb_phy_io_write(phy,
+ ULPI_MISC_A_VBUSVLDEXT |
+ ULPI_MISC_A_VBUSVLDEXTSEL,
+ ULPI_SET(ULPI_MISC_A));
+
+ temp = readl_relaxed(USB_GENCONFIG_2);
+ temp |= GENCONFIG_2_SESS_VLD_CTRL_EN;
+ writel_relaxed(temp, USB_GENCONFIG_2);
+
+ temp = readl_relaxed(USB_USBCMD);
+ temp |= USBCMD_SESS_VLD_CTRL;
+ writel_relaxed(temp, USB_USBCMD);
+
+ /*
+ * Add memory barrier as it is must to complete
+ * above USB PHY and Link register writes before
+ * moving ahead with USB peripheral mode enumeration,
+ * otherwise USB peripheral mode may not work.
+ */
+ mb();
+ }
+}
+
+static void ci13xxx_msm_reset(void)
+{
+ struct ci13xxx *udc = _udc;
+ struct usb_phy *phy = udc->transceiver;
+ struct device *dev = udc->gadget.dev.parent;
+ int temp;
+
+ writel_relaxed(0, USB_AHBBURST);
+ writel_relaxed(0x08, USB_AHBMODE);
+
+ /* workaround for rx buffer collision issue */
+ temp = readl_relaxed(USB_GENCONFIG);
+ temp &= ~GENCONFIG_TXFIFO_IDLE_FORCE_DISABLE;
+ temp &= ~GENCONFIG_ULPI_SERIAL_EN;
+ writel_relaxed(temp, USB_GENCONFIG);
+
+ if (udc->gadget.l1_supported)
+ ci13xxx_msm_set_l1(udc);
+
+ if (phy && (phy->flags & ENABLE_SECONDARY_PHY)) {
+ int temp;
+
+ dev_dbg(dev, "using secondary hsphy\n");
+ temp = readl_relaxed(USB_PHY_CTRL2);
+ temp |= (1<<16);
+ writel_relaxed(temp, USB_PHY_CTRL2);
+
+ /*
+ * Add memory barrier to make sure above LINK writes are
+ * complete before moving ahead with USB peripheral mode
+ * enumeration.
+ */
+ mb();
+ }
+}
+
+static void ci13xxx_msm_mark_err_event(void)
+{
+ struct ci13xxx *udc = _udc;
+ struct msm_otg *otg;
+
+ if (udc == NULL)
+ return;
+
+ if (udc->transceiver == NULL)
+ return;
+
+ otg = container_of(udc->transceiver, struct msm_otg, phy);
+
+ /* This will trigger hardware reset before next connection */
+ otg->err_event_seen = true;
+}
+
+static void ci13xxx_msm_notify_event(struct ci13xxx *udc, unsigned int event)
+{
+ struct device *dev = udc->gadget.dev.parent;
+
+ switch (event) {
+ case CI13XXX_CONTROLLER_RESET_EVENT:
+ dev_info(dev, "CI13XXX_CONTROLLER_RESET_EVENT received\n");
+ ci13xxx_msm_reset();
+ break;
+ case CI13XXX_CONTROLLER_DISCONNECT_EVENT:
+ dev_info(dev, "CI13XXX_CONTROLLER_DISCONNECT_EVENT received\n");
+ ci13xxx_msm_disconnect();
+ ci13xxx_msm_resume();
+ break;
+ case CI13XXX_CONTROLLER_CONNECT_EVENT:
+ dev_info(dev, "CI13XXX_CONTROLLER_CONNECT_EVENT received\n");
+ ci13xxx_msm_connect();
+ break;
+ case CI13XXX_CONTROLLER_SUSPEND_EVENT:
+ dev_info(dev, "CI13XXX_CONTROLLER_SUSPEND_EVENT received\n");
+ ci13xxx_msm_suspend();
+ break;
+ case CI13XXX_CONTROLLER_RESUME_EVENT:
+ dev_info(dev, "CI13XXX_CONTROLLER_RESUME_EVENT received\n");
+ ci13xxx_msm_resume();
+ break;
+ case CI13XXX_CONTROLLER_ERROR_EVENT:
+ dev_info(dev, "CI13XXX_CONTROLLER_ERROR_EVENT received\n");
+ ci13xxx_msm_mark_err_event();
+ break;
+ case CI13XXX_CONTROLLER_UDC_STARTED_EVENT:
+ dev_info(dev,
+ "CI13XXX_CONTROLLER_UDC_STARTED_EVENT received\n");
+ break;
+ default:
+ dev_dbg(dev, "unknown ci13xxx_udc event\n");
+ break;
+ }
+}
+
+static bool ci13xxx_msm_in_lpm(struct ci13xxx *udc)
+{
+ struct msm_otg *otg;
+
+ if (udc == NULL)
+ return false;
+
+ if (udc->transceiver == NULL)
+ return false;
+
+ otg = container_of(udc->transceiver, struct msm_otg, phy);
+
+ return (atomic_read(&otg->in_lpm) != 0);
+}
+
+
+static irqreturn_t ci13xxx_msm_resume_irq(int irq, void *data)
+{
+ struct ci13xxx *udc = _udc;
+
+ if (udc->transceiver && udc->vbus_active && udc->suspended)
+ usb_phy_set_suspend(udc->transceiver, 0);
+ else if (!udc->suspended)
+ ci13xxx_msm_resume();
+
+ return IRQ_HANDLED;
+}
+
+static struct ci13xxx_udc_driver ci13xxx_msm_udc_driver = {
+ .name = "ci13xxx_msm",
+ .flags = CI13XXX_REGS_SHARED |
+ CI13XXX_REQUIRE_TRANSCEIVER |
+ CI13XXX_PULLUP_ON_VBUS |
+ CI13XXX_ZERO_ITC |
+ CI13XXX_DISABLE_STREAMING,
+ .nz_itc = 0,
+ .notify_event = ci13xxx_msm_notify_event,
+ .in_lpm = ci13xxx_msm_in_lpm,
+};
+
+static int ci13xxx_msm_install_wake_gpio(struct platform_device *pdev,
+ struct resource *res)
+{
+ int wake_irq;
+ int ret;
+ struct pinctrl_state *set_state;
+
+ dev_dbg(&pdev->dev, "ci13xxx_msm_install_wake_gpio\n");
+
+ _udc_ctxt.wake_gpio = res->start;
+ if (_udc_ctxt.ci13xxx_pinctrl) {
+ set_state = pinctrl_lookup_state(_udc_ctxt.ci13xxx_pinctrl,
+ "ci13xxx_active");
+ if (IS_ERR(set_state)) {
+ pr_err("cannot get ci13xxx pinctrl active state\n");
+ return PTR_ERR(set_state);
+ }
+ pinctrl_select_state(_udc_ctxt.ci13xxx_pinctrl, set_state);
+ }
+ gpio_request(_udc_ctxt.wake_gpio, "USB_RESUME");
+ gpio_direction_input(_udc_ctxt.wake_gpio);
+ wake_irq = gpio_to_irq(_udc_ctxt.wake_gpio);
+ if (wake_irq < 0) {
+ dev_err(&pdev->dev, "could not register USB_RESUME GPIO.\n");
+ return -ENXIO;
+ }
+
+ dev_dbg(&pdev->dev, "_udc_ctxt.gpio_irq = %d and irq = %d\n",
+ _udc_ctxt.wake_gpio, wake_irq);
+ ret = request_irq(wake_irq, ci13xxx_msm_resume_irq,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT, "usb resume", NULL);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "could not register USB_RESUME IRQ.\n");
+ goto gpio_free;
+ }
+ disable_irq(wake_irq);
+ _udc_ctxt.wake_irq = wake_irq;
+
+ return 0;
+
+gpio_free:
+ gpio_free(_udc_ctxt.wake_gpio);
+ if (_udc_ctxt.ci13xxx_pinctrl) {
+ set_state = pinctrl_lookup_state(_udc_ctxt.ci13xxx_pinctrl,
+ "ci13xxx_sleep");
+ if (IS_ERR(set_state))
+ pr_err("cannot get ci13xxx pinctrl sleep state\n");
+ else
+ pinctrl_select_state(_udc_ctxt.ci13xxx_pinctrl,
+ set_state);
+ }
+ _udc_ctxt.wake_gpio = 0;
+ return ret;
+}
+
+static void ci13xxx_msm_uninstall_wake_gpio(struct platform_device *pdev)
+{
+ struct pinctrl_state *set_state;
+
+ dev_dbg(&pdev->dev, "ci13xxx_msm_uninstall_wake_gpio\n");
+
+ if (_udc_ctxt.wake_gpio) {
+ gpio_free(_udc_ctxt.wake_gpio);
+ if (_udc_ctxt.ci13xxx_pinctrl) {
+ set_state =
+ pinctrl_lookup_state(_udc_ctxt.ci13xxx_pinctrl,
+ "ci13xxx_sleep");
+ if (IS_ERR(set_state))
+ pr_err("cannot get ci13xxx pinctrl sleep state\n");
+ else
+ pinctrl_select_state(_udc_ctxt.ci13xxx_pinctrl,
+ set_state);
+ }
+ _udc_ctxt.wake_gpio = 0;
+ }
+}
+
+static void enable_usb_irq_timer_func(unsigned long data);
+static int ci13xxx_msm_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret;
+ struct ci13xxx_platform_data *pdata = pdev->dev.platform_data;
+ bool is_l1_supported = false;
+
+ dev_dbg(&pdev->dev, "ci13xxx_msm_probe\n");
+
+ if (pdata) {
+ /* Acceptable values for nz_itc are: 0,1,2,4,8,16,32,64 */
+ if (pdata->log2_itc > CI13XXX_MSM_MAX_LOG2_ITC ||
+ pdata->log2_itc <= 0)
+ ci13xxx_msm_udc_driver.nz_itc = 0;
+ else
+ ci13xxx_msm_udc_driver.nz_itc =
+ 1 << (pdata->log2_itc-1);
+
+ is_l1_supported = pdata->l1_supported;
+ /* Set ahb2ahb bypass flag if it is requested. */
+ if (pdata->enable_ahb2ahb_bypass)
+ ci13xxx_msm_udc_driver.flags |=
+ CI13XXX_ENABLE_AHB2AHB_BYPASS;
+
+ /* Clear disable streaming flag if is requested. */
+ if (pdata->enable_streaming)
+ ci13xxx_msm_udc_driver.flags &=
+ ~CI13XXX_DISABLE_STREAMING;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get platform resource mem\n");
+ return -ENXIO;
+ }
+
+ _udc_ctxt.regs = ioremap(res->start, resource_size(res));
+ if (!_udc_ctxt.regs) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ return -ENOMEM;
+ }
+
+ ret = udc_probe(&ci13xxx_msm_udc_driver, &pdev->dev, _udc_ctxt.regs);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "udc_probe failed\n");
+ goto iounmap;
+ }
+
+ _udc->gadget.l1_supported = is_l1_supported;
+
+ _udc_ctxt.irq = platform_get_irq(pdev, 0);
+ if (_udc_ctxt.irq < 0) {
+ dev_err(&pdev->dev, "IRQ not found\n");
+ ret = -ENXIO;
+ goto udc_remove;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IO, "USB_RESUME");
+ /* Get pinctrl if target uses pinctrl */
+ _udc_ctxt.ci13xxx_pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR(_udc_ctxt.ci13xxx_pinctrl)) {
+ if (of_property_read_bool(pdev->dev.of_node, "pinctrl-names")) {
+ dev_err(&pdev->dev, "Error encountered while getting pinctrl");
+ ret = PTR_ERR(_udc_ctxt.ci13xxx_pinctrl);
+ goto udc_remove;
+ }
+ dev_dbg(&pdev->dev, "Target does not use pinctrl\n");
+ _udc_ctxt.ci13xxx_pinctrl = NULL;
+ }
+ if (res) {
+ ret = ci13xxx_msm_install_wake_gpio(pdev, res);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "gpio irq install failed\n");
+ goto udc_remove;
+ }
+ }
+
+ ret = request_irq(_udc_ctxt.irq, msm_udc_irq, IRQF_SHARED, pdev->name,
+ pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request_irq failed\n");
+ goto gpio_uninstall;
+ }
+
+ setup_timer(&_udc_ctxt.irq_enable_timer, enable_usb_irq_timer_func,
+ (unsigned long)NULL);
+
+ pm_runtime_no_callbacks(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ return 0;
+
+gpio_uninstall:
+ ci13xxx_msm_uninstall_wake_gpio(pdev);
+udc_remove:
+ udc_remove();
+iounmap:
+ iounmap(_udc_ctxt.regs);
+
+ return ret;
+}
+
+int ci13xxx_msm_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+ free_irq(_udc_ctxt.irq, pdev);
+ ci13xxx_msm_uninstall_wake_gpio(pdev);
+ udc_remove();
+ iounmap(_udc_ctxt.regs);
+ return 0;
+}
+
+void ci13xxx_msm_shutdown(struct platform_device *pdev)
+{
+ ci13xxx_pullup(&_udc->gadget, 0);
+}
+
+void msm_hw_soft_reset(void)
+{
+ struct ci13xxx *udc = _udc;
+
+ hw_device_reset(udc);
+}
+
+void msm_hw_bam_disable(bool bam_disable)
+{
+ u32 val;
+ struct ci13xxx *udc = _udc;
+
+ if (bam_disable)
+ val = readl_relaxed(USB_GENCONFIG) | GENCONFIG_BAM_DISABLE;
+ else
+ val = readl_relaxed(USB_GENCONFIG) & ~GENCONFIG_BAM_DISABLE;
+
+ writel_relaxed(val, USB_GENCONFIG);
+}
+
+void msm_usb_irq_disable(bool disable)
+{
+ struct ci13xxx *udc = _udc;
+ unsigned long flags;
+
+ spin_lock_irqsave(udc->lock, flags);
+
+ if (_udc_ctxt.irq_disabled == disable) {
+ pr_debug("Interrupt state already disable = %d\n", disable);
+ if (disable)
+ mod_timer(&_udc_ctxt.irq_enable_timer,
+ IRQ_ENABLE_DELAY);
+ spin_unlock_irqrestore(udc->lock, flags);
+ return;
+ }
+
+ if (disable) {
+ disable_irq_nosync(_udc_ctxt.irq);
+ /* start timer here */
+ pr_debug("%s: Disabling interrupts\n", __func__);
+ mod_timer(&_udc_ctxt.irq_enable_timer, IRQ_ENABLE_DELAY);
+ _udc_ctxt.irq_disabled = true;
+
+ } else {
+ pr_debug("%s: Enabling interrupts\n", __func__);
+ del_timer(&_udc_ctxt.irq_enable_timer);
+ enable_irq(_udc_ctxt.irq);
+ _udc_ctxt.irq_disabled = false;
+ }
+
+ spin_unlock_irqrestore(udc->lock, flags);
+}
+
+static void enable_usb_irq_timer_func(unsigned long data)
+{
+ pr_debug("enabling interrupt from timer\n");
+ msm_usb_irq_disable(false);
+}
+
+static struct platform_driver ci13xxx_msm_driver = {
+ .probe = ci13xxx_msm_probe,
+ .driver = {
+ .name = "msm_hsusb",
+ },
+ .remove = ci13xxx_msm_remove,
+ .shutdown = ci13xxx_msm_shutdown,
+};
+MODULE_ALIAS("platform:msm_hsusb");
+
+static int __init ci13xxx_msm_init(void)
+{
+ return platform_driver_register(&ci13xxx_msm_driver);
+}
+module_init(ci13xxx_msm_init);
+
+static void __exit ci13xxx_msm_exit(void)
+{
+ platform_driver_unregister(&ci13xxx_msm_driver);
+}
+module_exit(ci13xxx_msm_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
new file mode 100644
index 0000000..28aaa1f
--- /dev/null
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -0,0 +1,3983 @@
+/*
+ * ci13xxx_udc.c - MIPS USB IP core family device controller
+ *
+ * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
+ *
+ * Author: David Lopo
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Description: MIPS USB IP core family device controller
+ * Currently it only supports IP part number CI13412
+ *
+ * This driver is composed of several blocks:
+ * - HW: hardware interface
+ * - DBG: debug facilities (optional)
+ * - UTIL: utilities
+ * - ISR: interrupts handling
+ * - ENDPT: endpoint operations (Gadget API)
+ * - GADGET: gadget operations (Gadget API)
+ * - BUS: bus glue code, bus abstraction layer
+ *
+ * Compile Options
+ * - CONFIG_USB_GADGET_DEBUG_FILES: enable debug facilities
+ * - STALL_IN: non-empty bulk-in pipes cannot be halted
+ * if defined mass storage compliance succeeds but with warnings
+ * => case 4: Hi > Dn
+ * => case 5: Hi > Di
+ * => case 8: Hi <> Do
+ * if undefined usbtest 13 fails
+ * - TRACE: enable function tracing (depends on DEBUG)
+ *
+ * Main Features
+ * - Chapter 9 & Mass Storage Compliance with Gadget File Storage
+ * - Chapter 9 Compliance with Gadget Zero (STALL_IN undefined)
+ * - Normal & LPM support
+ *
+ * USBTEST Report
+ * - OK: 0-12, 13 (STALL_IN defined) & 14
+ * - Not Supported: 15 & 16 (ISO)
+ *
+ * TODO List
+ * - OTG
+ * - Isochronous & Interrupt Traffic
+ * - Handle requests which spawns into several TDs
+ * - GET_STATUS(device) - always reports 0
+ * - Gadget API (majority of optional features)
+ */
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/ratelimit.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/msm_hsusb.h>
+
+#include "ci13xxx_udc.h"
+
+/******************************************************************************
+ * DEFINE
+ *****************************************************************************/
+
+#define USB_MAX_TIMEOUT 25 /* 25msec timeout */
+#define EP_PRIME_CHECK_DELAY (jiffies + msecs_to_jiffies(1000))
+#define MAX_PRIME_CHECK_RETRY 3 /*Wait for 3sec for EP prime failure */
+#define EXTRA_ALLOCATION_SIZE 256
+
+/* ctrl register bank access */
+static DEFINE_SPINLOCK(udc_lock);
+
+/* control endpoint description */
+static const struct usb_endpoint_descriptor
+ctrl_endpt_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+ .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
+};
+
+static const struct usb_endpoint_descriptor
+ctrl_endpt_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+ .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
+};
+
+/* UDC descriptor */
+static struct ci13xxx *_udc;
+
+/* Interrupt statistics */
+#define ISR_MASK 0x1F
+static struct {
+ u32 test;
+ u32 ui;
+ u32 uei;
+ u32 pci;
+ u32 uri;
+ u32 sli;
+ u32 none;
+ struct {
+ u32 cnt;
+ u32 buf[ISR_MASK+1];
+ u32 idx;
+ } hndl;
+} isr_statistics;
+
+/**
+ * ffs_nr: find first (least significant) bit set
+ * @x: the word to search
+ *
+ * This function returns bit number (instead of position)
+ */
+static int ffs_nr(u32 x)
+{
+ int n = ffs(x);
+
+ return n ? n-1 : 32;
+}
+
+/******************************************************************************
+ * HW block
+ *****************************************************************************/
+/* register bank descriptor */
+static struct {
+ unsigned int lpm; /* is LPM? */
+ void __iomem *abs; /* bus map offset */
+ void __iomem *cap; /* bus map offset + CAP offset + CAP data */
+ size_t size; /* bank size */
+} hw_bank;
+
+/* MSM specific */
+#define ABS_AHBBURST (0x0090UL)
+#define ABS_AHBMODE (0x0098UL)
+/* UDC register map */
+#define ABS_CAPLENGTH (0x100UL)
+#define ABS_HCCPARAMS (0x108UL)
+#define ABS_DCCPARAMS (0x124UL)
+#define ABS_TESTMODE (hw_bank.lpm ? 0x0FCUL : 0x138UL)
+/* offset to CAPLENTGH (addr + data) */
+#define CAP_USBCMD (0x000UL)
+#define CAP_USBSTS (0x004UL)
+#define CAP_USBINTR (0x008UL)
+#define CAP_DEVICEADDR (0x014UL)
+#define CAP_ENDPTLISTADDR (0x018UL)
+#define CAP_PORTSC (0x044UL)
+#define CAP_DEVLC (0x084UL)
+#define CAP_ENDPTPIPEID (0x0BCUL)
+#define CAP_USBMODE (hw_bank.lpm ? 0x0C8UL : 0x068UL)
+#define CAP_ENDPTSETUPSTAT (hw_bank.lpm ? 0x0D8UL : 0x06CUL)
+#define CAP_ENDPTPRIME (hw_bank.lpm ? 0x0DCUL : 0x070UL)
+#define CAP_ENDPTFLUSH (hw_bank.lpm ? 0x0E0UL : 0x074UL)
+#define CAP_ENDPTSTAT (hw_bank.lpm ? 0x0E4UL : 0x078UL)
+#define CAP_ENDPTCOMPLETE (hw_bank.lpm ? 0x0E8UL : 0x07CUL)
+#define CAP_ENDPTCTRL (hw_bank.lpm ? 0x0ECUL : 0x080UL)
+#define CAP_LAST (hw_bank.lpm ? 0x12CUL : 0x0C0UL)
+
+#define REMOTE_WAKEUP_DELAY msecs_to_jiffies(200)
+
+/* maximum number of enpoints: valid only after hw_device_reset() */
+static unsigned int hw_ep_max;
+static void dbg_usb_op_fail(u8 addr, const char *name,
+ const struct ci13xxx_ep *mep);
+/**
+ * hw_ep_bit: calculates the bit number
+ * @num: endpoint number
+ * @dir: endpoint direction
+ *
+ * This function returns bit number
+ */
+static inline int hw_ep_bit(int num, int dir)
+{
+ return num + (dir ? 16 : 0);
+}
+
+static int ep_to_bit(int n)
+{
+ int fill = 16 - hw_ep_max / 2;
+
+ if (n >= hw_ep_max / 2)
+ n += fill;
+
+ return n;
+}
+
+/**
+ * hw_aread: reads from register bitfield
+ * @addr: address relative to bus map
+ * @mask: bitfield mask
+ *
+ * This function returns register bitfield data
+ */
+static u32 hw_aread(u32 addr, u32 mask)
+{
+ return ioread32(addr + hw_bank.abs) & mask;
+}
+
+/**
+ * hw_awrite: writes to register bitfield
+ * @addr: address relative to bus map
+ * @mask: bitfield mask
+ * @data: new data
+ */
+static void hw_awrite(u32 addr, u32 mask, u32 data)
+{
+ iowrite32(hw_aread(addr, ~mask) | (data & mask),
+ addr + hw_bank.abs);
+}
+
+/**
+ * hw_cread: reads from register bitfield
+ * @addr: address relative to CAP offset plus content
+ * @mask: bitfield mask
+ *
+ * This function returns register bitfield data
+ */
+static u32 hw_cread(u32 addr, u32 mask)
+{
+ return ioread32(addr + hw_bank.cap) & mask;
+}
+
+/**
+ * hw_cwrite: writes to register bitfield
+ * @addr: address relative to CAP offset plus content
+ * @mask: bitfield mask
+ * @data: new data
+ */
+static void hw_cwrite(u32 addr, u32 mask, u32 data)
+{
+ iowrite32(hw_cread(addr, ~mask) | (data & mask),
+ addr + hw_bank.cap);
+}
+
+/**
+ * hw_ctest_and_clear: tests & clears register bitfield
+ * @addr: address relative to CAP offset plus content
+ * @mask: bitfield mask
+ *
+ * This function returns register bitfield data
+ */
+static u32 hw_ctest_and_clear(u32 addr, u32 mask)
+{
+ u32 reg = hw_cread(addr, mask);
+
+ iowrite32(reg, addr + hw_bank.cap);
+ return reg;
+}
+
+/**
+ * hw_ctest_and_write: tests & writes register bitfield
+ * @addr: address relative to CAP offset plus content
+ * @mask: bitfield mask
+ * @data: new data
+ *
+ * This function returns register bitfield data
+ */
+static u32 hw_ctest_and_write(u32 addr, u32 mask, u32 data)
+{
+ u32 reg = hw_cread(addr, ~0);
+
+ iowrite32((reg & ~mask) | (data & mask), addr + hw_bank.cap);
+ return (reg & mask) >> ffs_nr(mask);
+}
+
+static int hw_device_init(void __iomem *base)
+{
+ u32 reg;
+
+ /* bank is a module variable */
+ hw_bank.abs = base;
+
+ hw_bank.cap = hw_bank.abs;
+ hw_bank.cap += ABS_CAPLENGTH;
+ hw_bank.cap += ioread8(hw_bank.cap);
+
+ reg = hw_aread(ABS_HCCPARAMS, HCCPARAMS_LEN) >> ffs_nr(HCCPARAMS_LEN);
+ hw_bank.lpm = reg;
+ hw_bank.size = hw_bank.cap - hw_bank.abs;
+ hw_bank.size += CAP_LAST;
+ hw_bank.size /= sizeof(u32);
+
+ reg = hw_aread(ABS_DCCPARAMS, DCCPARAMS_DEN) >> ffs_nr(DCCPARAMS_DEN);
+ hw_ep_max = reg * 2; /* cache hw ENDPT_MAX */
+
+ if (hw_ep_max == 0 || hw_ep_max > ENDPT_MAX)
+ return -ENODEV;
+
+ /* setup lock mode ? */
+
+ /* ENDPTSETUPSTAT is '0' by default */
+
+ /* HCSPARAMS.bf.ppc SHOULD BE zero for device */
+
+ return 0;
+}
+/**
+ * hw_device_reset: resets chip (execute without interruption)
+ * @base: register base address
+ *
+ * This function returns an error code
+ */
+static int hw_device_reset(struct ci13xxx *udc)
+{
+ int delay_count = 25; /* 250 usec */
+
+ /* should flush & stop before reset */
+ hw_cwrite(CAP_ENDPTFLUSH, ~0, ~0);
+ hw_cwrite(CAP_USBCMD, USBCMD_RS, 0);
+
+ hw_cwrite(CAP_USBCMD, USBCMD_RST, USBCMD_RST);
+ while (delay_count-- && hw_cread(CAP_USBCMD, USBCMD_RST))
+ udelay(10);
+ if (delay_count < 0)
+ pr_err("USB controller reset failed\n");
+
+ if (udc->udc_driver->notify_event)
+ udc->udc_driver->notify_event(udc,
+ CI13XXX_CONTROLLER_RESET_EVENT);
+
+ /* USBMODE should be configured step by step */
+ hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_IDLE);
+ hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_DEVICE);
+ hw_cwrite(CAP_USBMODE, USBMODE_SLOM, USBMODE_SLOM); /* HW >= 2.3 */
+
+ /*
+ * ITC (Interrupt Threshold Control) field is to set the maximum
+ * rate at which the device controller will issue interrupts.
+ * The maximum interrupt interval measured in micro frames.
+ * Valid values are 0, 1, 2, 4, 8, 16, 32, 64. The default value is
+ * 8 micro frames. If CPU can handle interrupts at faster rate, ITC
+ * can be set to lesser value to gain performance.
+ */
+ if (udc->udc_driver->nz_itc)
+ hw_cwrite(CAP_USBCMD, USBCMD_ITC_MASK,
+ USBCMD_ITC(udc->udc_driver->nz_itc));
+ else if (udc->udc_driver->flags & CI13XXX_ZERO_ITC)
+ hw_cwrite(CAP_USBCMD, USBCMD_ITC_MASK, USBCMD_ITC(0));
+
+ if (hw_cread(CAP_USBMODE, USBMODE_CM) != USBMODE_CM_DEVICE) {
+ pr_err("cannot enter in device mode");
+ pr_err("lpm = %i", hw_bank.lpm);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+ * hw_device_state: enables/disables interrupts & starts/stops device (execute
+ * without interruption)
+ * @dma: 0 => disable, !0 => enable and set dma engine
+ *
+ * This function returns an error code
+ */
+static int hw_device_state(u32 dma)
+{
+ struct ci13xxx *udc = _udc;
+
+ if (dma) {
+ if (!(udc->udc_driver->flags & CI13XXX_DISABLE_STREAMING)) {
+ hw_cwrite(CAP_USBMODE, USBMODE_SDIS, 0);
+ pr_debug("%s(): streaming mode is enabled. USBMODE:%x\n",
+ __func__, hw_cread(CAP_USBMODE, ~0));
+
+ } else {
+ hw_cwrite(CAP_USBMODE, USBMODE_SDIS, USBMODE_SDIS);
+ pr_debug("%s(): streaming mode is disabled. USBMODE:%x\n",
+ __func__, hw_cread(CAP_USBMODE, ~0));
+ }
+
+ hw_cwrite(CAP_ENDPTLISTADDR, ~0, dma);
+
+
+ /* Set BIT(31) to enable AHB2AHB Bypass functionality */
+ if (udc->udc_driver->flags & CI13XXX_ENABLE_AHB2AHB_BYPASS) {
+ hw_awrite(ABS_AHBMODE, AHB2AHB_BYPASS, AHB2AHB_BYPASS);
+ pr_debug("%s(): ByPass Mode is enabled. AHBMODE:%x\n",
+ __func__, hw_aread(ABS_AHBMODE, ~0));
+ }
+
+ /* interrupt, error, port change, reset, sleep/suspend */
+ hw_cwrite(CAP_USBINTR, ~0,
+ USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
+ hw_cwrite(CAP_USBCMD, USBCMD_RS, USBCMD_RS);
+ } else {
+ hw_cwrite(CAP_USBCMD, USBCMD_RS, 0);
+ hw_cwrite(CAP_USBINTR, ~0, 0);
+ /* Clear BIT(31) to disable AHB2AHB Bypass functionality */
+ if (udc->udc_driver->flags & CI13XXX_ENABLE_AHB2AHB_BYPASS) {
+ hw_awrite(ABS_AHBMODE, AHB2AHB_BYPASS, 0);
+ pr_debug("%s(): ByPass Mode is disabled. AHBMODE:%x\n",
+ __func__, hw_aread(ABS_AHBMODE, ~0));
+ }
+ }
+ return 0;
+}
+
+static void debug_ept_flush_info(int ep_num, int dir)
+{
+ struct ci13xxx *udc = _udc;
+ struct ci13xxx_ep *mep;
+
+ if (dir)
+ mep = &udc->ci13xxx_ep[ep_num + hw_ep_max/2];
+ else
+ mep = &udc->ci13xxx_ep[ep_num];
+
+ pr_err_ratelimited("USB Registers\n");
+ pr_err_ratelimited("USBCMD:%x\n", hw_cread(CAP_USBCMD, ~0));
+ pr_err_ratelimited("USBSTS:%x\n", hw_cread(CAP_USBSTS, ~0));
+ pr_err_ratelimited("ENDPTLISTADDR:%x\n",
+ hw_cread(CAP_ENDPTLISTADDR, ~0));
+ pr_err_ratelimited("PORTSC:%x\n", hw_cread(CAP_PORTSC, ~0));
+ pr_err_ratelimited("USBMODE:%x\n", hw_cread(CAP_USBMODE, ~0));
+ pr_err_ratelimited("ENDPTSTAT:%x\n", hw_cread(CAP_ENDPTSTAT, ~0));
+
+ dbg_usb_op_fail(0xFF, "FLUSHF", mep);
+}
+/**
+ * hw_ep_flush: flush endpoint fifo (execute without interruption)
+ * @num: endpoint number
+ * @dir: endpoint direction
+ *
+ * This function returns an error code
+ */
+static int hw_ep_flush(int num, int dir)
+{
+ ktime_t start, diff;
+ int n = hw_ep_bit(num, dir);
+ struct ci13xxx_ep *mEp = &_udc->ci13xxx_ep[n];
+
+ /* Flush ep0 even when queue is empty */
+ if (_udc->skip_flush || (num && list_empty(&mEp->qh.queue)))
+ return 0;
+
+ start = ktime_get();
+ do {
+ /* flush any pending transfer */
+ hw_cwrite(CAP_ENDPTFLUSH, BIT(n), BIT(n));
+ while (hw_cread(CAP_ENDPTFLUSH, BIT(n))) {
+ cpu_relax();
+ diff = ktime_sub(ktime_get(), start);
+ if (ktime_to_ms(diff) > USB_MAX_TIMEOUT) {
+ printk_ratelimited(KERN_ERR
+ "%s: Failed to flush ep#%d %s\n",
+ __func__, num,
+ dir ? "IN" : "OUT");
+ debug_ept_flush_info(num, dir);
+ _udc->skip_flush = true;
+ /* Notify to trigger h/w reset recovery later */
+ if (_udc->udc_driver->notify_event)
+ _udc->udc_driver->notify_event(_udc,
+ CI13XXX_CONTROLLER_ERROR_EVENT);
+ return 0;
+ }
+ }
+ } while (hw_cread(CAP_ENDPTSTAT, BIT(n)));
+
+ return 0;
+}
+
+/**
+ * hw_ep_disable: disables endpoint (execute without interruption)
+ * @num: endpoint number
+ * @dir: endpoint direction
+ *
+ * This function returns an error code
+ */
+static int hw_ep_disable(int num, int dir)
+{
+ hw_cwrite(CAP_ENDPTCTRL + num * sizeof(u32),
+ dir ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
+ return 0;
+}
+
+/**
+ * hw_ep_enable: enables endpoint (execute without interruption)
+ * @num: endpoint number
+ * @dir: endpoint direction
+ * @type: endpoint type
+ *
+ * This function returns an error code
+ */
+static int hw_ep_enable(int num, int dir, int type)
+{
+ u32 mask, data;
+
+ if (dir) {
+ mask = ENDPTCTRL_TXT; /* type */
+ data = type << ffs_nr(mask);
+
+ mask |= ENDPTCTRL_TXS; /* unstall */
+ mask |= ENDPTCTRL_TXR; /* reset data toggle */
+ data |= ENDPTCTRL_TXR;
+ mask |= ENDPTCTRL_TXE; /* enable */
+ data |= ENDPTCTRL_TXE;
+ } else {
+ mask = ENDPTCTRL_RXT; /* type */
+ data = type << ffs_nr(mask);
+
+ mask |= ENDPTCTRL_RXS; /* unstall */
+ mask |= ENDPTCTRL_RXR; /* reset data toggle */
+ data |= ENDPTCTRL_RXR;
+ mask |= ENDPTCTRL_RXE; /* enable */
+ data |= ENDPTCTRL_RXE;
+ }
+ hw_cwrite(CAP_ENDPTCTRL + num * sizeof(u32), mask, data);
+
+ /* make sure endpoint is enabled before returning */
+ mb();
+
+ return 0;
+}
+
+/**
+ * hw_ep_get_halt: return endpoint halt status
+ * @num: endpoint number
+ * @dir: endpoint direction
+ *
+ * This function returns 1 if endpoint halted
+ */
+static int hw_ep_get_halt(int num, int dir)
+{
+ u32 mask = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
+
+ return hw_cread(CAP_ENDPTCTRL + num * sizeof(u32), mask) ? 1 : 0;
+}
+
+/**
+ * hw_test_and_clear_setup_status: test & clear setup status (execute without
+ * interruption)
+ * @n: endpoint number
+ *
+ * This function returns setup status
+ */
+static int hw_test_and_clear_setup_status(int n)
+{
+ n = ep_to_bit(n);
+ return hw_ctest_and_clear(CAP_ENDPTSETUPSTAT, BIT(n));
+}
+
+/**
+ * hw_ep_prime: primes endpoint (execute without interruption)
+ * @num: endpoint number
+ * @dir: endpoint direction
+ * @is_ctrl: true if control endpoint
+ *
+ * This function returns an error code
+ */
+static int hw_ep_prime(int num, int dir, int is_ctrl)
+{
+ int n = hw_ep_bit(num, dir);
+
+ if (is_ctrl && dir == RX && hw_cread(CAP_ENDPTSETUPSTAT, BIT(num)))
+ return -EAGAIN;
+
+ hw_cwrite(CAP_ENDPTPRIME, BIT(n), BIT(n));
+
+ if (is_ctrl && dir == RX && hw_cread(CAP_ENDPTSETUPSTAT, BIT(num)))
+ return -EAGAIN;
+
+ /* status shoult be tested according with manual but it doesn't work */
+ return 0;
+}
+
+/**
+ * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
+ * without interruption)
+ * @num: endpoint number
+ * @dir: endpoint direction
+ * @value: true => stall, false => unstall
+ *
+ * This function returns an error code
+ */
+static int hw_ep_set_halt(int num, int dir, int value)
+{
+ u32 addr, mask_xs, mask_xr;
+
+ if (value != 0 && value != 1)
+ return -EINVAL;
+
+ do {
+ if (hw_cread(CAP_ENDPTSETUPSTAT, BIT(num)))
+ return 0;
+
+ addr = CAP_ENDPTCTRL + num * sizeof(u32);
+ mask_xs = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
+ mask_xr = dir ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
+
+ /* data toggle - reserved for EP0 but it's in ESS */
+ hw_cwrite(addr, mask_xs|mask_xr, value ? mask_xs : mask_xr);
+
+ } while (value != hw_ep_get_halt(num, dir));
+
+ return 0;
+}
+
+/**
+ * hw_intr_clear: disables interrupt & clears interrupt status (execute without
+ * interruption)
+ * @n: interrupt bit
+ *
+ * This function returns an error code
+ */
+static int hw_intr_clear(int n)
+{
+ if (n >= REG_BITS)
+ return -EINVAL;
+
+ hw_cwrite(CAP_USBINTR, BIT(n), 0);
+ hw_cwrite(CAP_USBSTS, BIT(n), BIT(n));
+ return 0;
+}
+
+/**
+ * hw_intr_force: enables interrupt & forces interrupt status (execute without
+ * interruption)
+ * @n: interrupt bit
+ *
+ * This function returns an error code
+ */
+static int hw_intr_force(int n)
+{
+ if (n >= REG_BITS)
+ return -EINVAL;
+
+ hw_awrite(ABS_TESTMODE, TESTMODE_FORCE, TESTMODE_FORCE);
+ hw_cwrite(CAP_USBINTR, BIT(n), BIT(n));
+ hw_cwrite(CAP_USBSTS, BIT(n), BIT(n));
+ hw_awrite(ABS_TESTMODE, TESTMODE_FORCE, 0);
+ return 0;
+}
+
+/**
+ * hw_is_port_high_speed: test if port is high speed
+ *
+ * This function returns true if high speed port
+ */
+static int hw_port_is_high_speed(void)
+{
+ return hw_bank.lpm ? hw_cread(CAP_DEVLC, DEVLC_PSPD) :
+ hw_cread(CAP_PORTSC, PORTSC_HSP);
+}
+
+/**
+ * hw_port_test_get: reads port test mode value
+ *
+ * This function returns port test mode value
+ */
+static u8 hw_port_test_get(void)
+{
+ return hw_cread(CAP_PORTSC, PORTSC_PTC) >> ffs_nr(PORTSC_PTC);
+}
+
+/**
+ * hw_port_test_set: writes port test mode (execute without interruption)
+ * @mode: new value
+ *
+ * This function returns an error code
+ */
+static int hw_port_test_set(u8 mode)
+{
+ const u8 TEST_MODE_MAX = 7;
+
+ if (mode > TEST_MODE_MAX)
+ return -EINVAL;
+
+ hw_cwrite(CAP_PORTSC, PORTSC_PTC, mode << ffs_nr(PORTSC_PTC));
+ return 0;
+}
+
+/**
+ * hw_read_intr_enable: returns interrupt enable register
+ *
+ * This function returns register data
+ */
+static u32 hw_read_intr_enable(void)
+{
+ return hw_cread(CAP_USBINTR, ~0);
+}
+
+/**
+ * hw_read_intr_status: returns interrupt status register
+ *
+ * This function returns register data
+ */
+static u32 hw_read_intr_status(void)
+{
+ return hw_cread(CAP_USBSTS, ~0);
+}
+
+/**
+ * hw_register_read: reads all device registers (execute without interruption)
+ * @buf: destination buffer
+ * @size: buffer size
+ *
+ * This function returns number of registers read
+ */
+static size_t hw_register_read(u32 *buf, size_t size)
+{
+ unsigned int i;
+
+ if (size > hw_bank.size)
+ size = hw_bank.size;
+
+ for (i = 0; i < size; i++)
+ buf[i] = hw_aread(i * sizeof(u32), ~0);
+
+ return size;
+}
+
+/**
+ * hw_register_write: writes to register
+ * @addr: register address
+ * @data: register value
+ *
+ * This function returns an error code
+ */
+static int hw_register_write(u16 addr, u32 data)
+{
+ /* align */
+ addr /= sizeof(u32);
+
+ if (addr >= hw_bank.size)
+ return -EINVAL;
+
+ /* align */
+ addr *= sizeof(u32);
+
+ hw_awrite(addr, ~0, data);
+ return 0;
+}
+
+/**
+ * hw_test_and_clear_complete: test & clear complete status (execute without
+ * interruption)
+ * @n: endpoint number
+ *
+ * This function returns complete status
+ */
+static int hw_test_and_clear_complete(int n)
+{
+ n = ep_to_bit(n);
+ return hw_ctest_and_clear(CAP_ENDPTCOMPLETE, BIT(n));
+}
+
+/**
+ * hw_test_and_clear_intr_active: test & clear active interrupts (execute
+ * without interruption)
+ *
+ * This function returns active interrutps
+ */
+static u32 hw_test_and_clear_intr_active(void)
+{
+ u32 reg = hw_read_intr_status() & hw_read_intr_enable();
+
+ hw_cwrite(CAP_USBSTS, ~0, reg);
+ return reg;
+}
+
+/**
+ * hw_test_and_clear_setup_guard: test & clear setup guard (execute without
+ * interruption)
+ *
+ * This function returns guard value
+ */
+static int hw_test_and_clear_setup_guard(void)
+{
+ return hw_ctest_and_write(CAP_USBCMD, USBCMD_SUTW, 0);
+}
+
+/**
+ * hw_test_and_set_setup_guard: test & set setup guard (execute without
+ * interruption)
+ *
+ * This function returns guard value
+ */
+static int hw_test_and_set_setup_guard(void)
+{
+ return hw_ctest_and_write(CAP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
+}
+
+/**
+ * hw_usb_set_address: configures USB address (execute without interruption)
+ * @value: new USB address
+ *
+ * This function returns an error code
+ */
+static int hw_usb_set_address(u8 value)
+{
+ /* advance */
+ hw_cwrite(CAP_DEVICEADDR, DEVICEADDR_USBADR | DEVICEADDR_USBADRA,
+ value << ffs_nr(DEVICEADDR_USBADR) | DEVICEADDR_USBADRA);
+ return 0;
+}
+
+/**
+ * hw_usb_reset: restart device after a bus reset (execute without
+ * interruption)
+ *
+ * This function returns an error code
+ */
+static int hw_usb_reset(void)
+{
+ int delay_count = 10; /* 100 usec delay */
+
+ hw_usb_set_address(0);
+
+ /* ESS flushes only at end?!? */
+ hw_cwrite(CAP_ENDPTFLUSH, ~0, ~0); /* flush all EPs */
+
+ /* clear complete status */
+ hw_cwrite(CAP_ENDPTCOMPLETE, 0, 0); /* writes its content */
+
+ /* wait until all bits cleared */
+ while (delay_count-- && hw_cread(CAP_ENDPTPRIME, ~0))
+ udelay(10);
+ if (delay_count < 0)
+ pr_err("ENDPTPRIME is not cleared during bus reset\n");
+
+ /* reset all endpoints ? */
+
+ /*
+ * reset internal status and wait for further instructions
+ * no need to verify the port reset status (ESS does it)
+ */
+
+ return 0;
+}
+
+/******************************************************************************
+ * DBG block
+ *****************************************************************************/
+/**
+ * show_device: prints information about device capabilities and status
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_device(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ struct usb_gadget *gadget = &udc->gadget;
+ int n = 0;
+
+ dbg_trace("[%s] %pK\n", __func__, buf);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ n += scnprintf(buf + n, PAGE_SIZE - n, "speed = %d\n",
+ gadget->speed);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "max_speed = %d\n",
+ gadget->max_speed);
+ /* TODO: Scheduled for removal in 3.8. */
+ n += scnprintf(buf + n, PAGE_SIZE - n, "is_dualspeed = %d\n",
+ gadget_is_dualspeed(gadget));
+ n += scnprintf(buf + n, PAGE_SIZE - n, "is_otg = %d\n",
+ gadget->is_otg);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "is_a_peripheral = %d\n",
+ gadget->is_a_peripheral);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "b_hnp_enable = %d\n",
+ gadget->b_hnp_enable);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "a_hnp_support = %d\n",
+ gadget->a_hnp_support);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "a_alt_hnp_support = %d\n",
+ gadget->a_alt_hnp_support);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "name = %s\n",
+ (gadget->name ? gadget->name : ""));
+
+ return n;
+}
+static DEVICE_ATTR(device, 0400, show_device, NULL);
+
+/**
+ * show_driver: prints information about attached gadget (if any)
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_driver(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ struct usb_gadget_driver *driver = udc->driver;
+ int n = 0;
+
+ dbg_trace("[%s] %pK\n", __func__, buf);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ if (driver == NULL)
+ return scnprintf(buf, PAGE_SIZE,
+ "There is no gadget attached!\n");
+
+ n += scnprintf(buf + n, PAGE_SIZE - n, "function = %s\n",
+ (driver->function ? driver->function : ""));
+ n += scnprintf(buf + n, PAGE_SIZE - n, "max speed = %d\n",
+ driver->max_speed);
+
+ return n;
+}
+static DEVICE_ATTR(driver, 0400, show_driver, NULL);
+
+/* Maximum event message length */
+#define DBG_DATA_MSG 64UL
+
+/* Maximum event messages */
+#define DBG_DATA_MAX 128UL
+
+/* Event buffer descriptor */
+static struct {
+ char (buf[DBG_DATA_MAX])[DBG_DATA_MSG]; /* buffer */
+ unsigned int idx; /* index */
+ unsigned int tty; /* print to console? */
+ rwlock_t lck; /* lock */
+} dbg_data = {
+ .idx = 0,
+ .tty = 0,
+ .lck = __RW_LOCK_UNLOCKED(lck)
+};
+
+/**
+ * dbg_dec: decrements debug event index
+ * @idx: buffer index
+ */
+static void dbg_dec(unsigned int *idx)
+{
+ *idx = (*idx - 1) & (DBG_DATA_MAX-1);
+}
+
+/**
+ * dbg_inc: increments debug event index
+ * @idx: buffer index
+ */
+static void dbg_inc(unsigned int *idx)
+{
+ *idx = (*idx + 1) & (DBG_DATA_MAX-1);
+}
+
+
+static unsigned int ep_addr_txdbg_mask;
+module_param(ep_addr_txdbg_mask, uint, 0644);
+static unsigned int ep_addr_rxdbg_mask;
+module_param(ep_addr_rxdbg_mask, uint, 0644);
+
+static int allow_dbg_print(u8 addr)
+{
+ int dir, num;
+
+ /* allow bus wide events */
+ if (addr == 0xff)
+ return 1;
+
+ dir = addr & USB_ENDPOINT_DIR_MASK ? TX : RX;
+ num = addr & ~USB_ENDPOINT_DIR_MASK;
+ num = 1 << num;
+
+ if ((dir == TX) && (num & ep_addr_txdbg_mask))
+ return 1;
+ if ((dir == RX) && (num & ep_addr_rxdbg_mask))
+ return 1;
+
+ return 0;
+}
+
+#define TIME_BUF_LEN 20
+/*get_timestamp - returns time of day in us */
+static char *get_timestamp(char *tbuf)
+{
+ unsigned long long t;
+ unsigned long nanosec_rem;
+
+ t = cpu_clock(smp_processor_id());
+ nanosec_rem = do_div(t, 1000000000)/1000;
+ scnprintf(tbuf, TIME_BUF_LEN, "[%5lu.%06lu] ", (unsigned long)t,
+ nanosec_rem);
+ return tbuf;
+}
+
+/**
+ * dbg_print: prints the common part of the event
+ * @addr: endpoint address
+ * @name: event name
+ * @status: status
+ * @extra: extra information
+ */
+static void dbg_print(u8 addr, const char *name, int status, const char *extra)
+{
+ unsigned long flags;
+ char tbuf[TIME_BUF_LEN];
+
+ if (!allow_dbg_print(addr))
+ return;
+
+ write_lock_irqsave(&dbg_data.lck, flags);
+
+ scnprintf(dbg_data.buf[dbg_data.idx], DBG_DATA_MSG,
+ "%s\t? %02X %-7.7s %4i ?\t%s\n",
+ get_timestamp(tbuf), addr, name, status, extra);
+
+ dbg_inc(&dbg_data.idx);
+
+ write_unlock_irqrestore(&dbg_data.lck, flags);
+
+ if (dbg_data.tty != 0)
+ pr_notice("%s\t? %02X %-7.7s %4i ?\t%s\n",
+ get_timestamp(tbuf), addr, name, status, extra);
+}
+
+/**
+ * dbg_done: prints a DONE event
+ * @addr: endpoint address
+ * @td: transfer descriptor
+ * @status: status
+ */
+static void dbg_done(u8 addr, const u32 token, int status)
+{
+ char msg[DBG_DATA_MSG];
+
+ scnprintf(msg, sizeof(msg), "%d %02X",
+ (int)(token & TD_TOTAL_BYTES) >> ffs_nr(TD_TOTAL_BYTES),
+ (int)(token & TD_STATUS) >> ffs_nr(TD_STATUS));
+ dbg_print(addr, "DONE", status, msg);
+}
+
+/**
+ * dbg_event: prints a generic event
+ * @addr: endpoint address
+ * @name: event name
+ * @status: status
+ */
+static void dbg_event(u8 addr, const char *name, int status)
+{
+ if (name != NULL)
+ dbg_print(addr, name, status, "");
+}
+
+/*
+ * dbg_queue: prints a QUEUE event
+ * @addr: endpoint address
+ * @req: USB request
+ * @status: status
+ */
+static void dbg_queue(u8 addr, const struct usb_request *req, int status)
+{
+ char msg[DBG_DATA_MSG];
+
+ if (req != NULL) {
+ scnprintf(msg, sizeof(msg),
+ "%d %d", !req->no_interrupt, req->length);
+ dbg_print(addr, "QUEUE", status, msg);
+ }
+}
+
+/**
+ * dbg_setup: prints a SETUP event
+ * @addr: endpoint address
+ * @req: setup request
+ */
+static void dbg_setup(u8 addr, const struct usb_ctrlrequest *req)
+{
+ char msg[DBG_DATA_MSG];
+
+ if (req != NULL) {
+ scnprintf(msg, sizeof(msg),
+ "%02X %02X %04X %04X %d", req->bRequestType,
+ req->bRequest, le16_to_cpu(req->wValue),
+ le16_to_cpu(req->wIndex), le16_to_cpu(req->wLength));
+ dbg_print(addr, "SETUP", 0, msg);
+ }
+}
+
+/**
+ * dbg_usb_op_fail: prints USB Operation FAIL event
+ * @addr: endpoint address
+ * @mEp: endpoint structure
+ */
+static void dbg_usb_op_fail(u8 addr, const char *name,
+ const struct ci13xxx_ep *mep)
+{
+ char msg[DBG_DATA_MSG];
+ struct ci13xxx_req *req;
+ struct list_head *ptr = NULL;
+
+ if (mep != NULL) {
+ scnprintf(msg, sizeof(msg),
+ "%s Fail EP%d%s QH:%08X",
+ name, mep->num,
+ mep->dir ? "IN" : "OUT", mep->qh.ptr->cap);
+ dbg_print(addr, name, 0, msg);
+ scnprintf(msg, sizeof(msg),
+ "cap:%08X %08X %08X\n",
+ mep->qh.ptr->curr, mep->qh.ptr->td.next,
+ mep->qh.ptr->td.token);
+ dbg_print(addr, "QHEAD", 0, msg);
+
+ list_for_each(ptr, &mep->qh.queue) {
+ req = list_entry(ptr, struct ci13xxx_req, queue);
+ scnprintf(msg, sizeof(msg),
+ "%pKa:%08X:%08X\n",
+ &req->dma, req->ptr->next,
+ req->ptr->token);
+ dbg_print(addr, "REQ", 0, msg);
+ scnprintf(msg, sizeof(msg), "%08X:%d\n",
+ req->ptr->page[0],
+ req->req.status);
+ dbg_print(addr, "REQPAGE", 0, msg);
+ }
+ }
+}
+
+/**
+ * show_events: displays the event buffer
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_events(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long flags;
+ unsigned int i, j, n = 0;
+
+ dbg_trace("[%s] %pK\n", __func__, buf);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ read_lock_irqsave(&dbg_data.lck, flags);
+
+ i = dbg_data.idx;
+ for (dbg_dec(&i); i != dbg_data.idx; dbg_dec(&i)) {
+ n += strlen(dbg_data.buf[i]);
+ if (n >= PAGE_SIZE) {
+ n -= strlen(dbg_data.buf[i]);
+ break;
+ }
+ }
+ for (j = 0, dbg_inc(&i); j < n; dbg_inc(&i))
+ j += scnprintf(buf + j, PAGE_SIZE - j,
+ "%s", dbg_data.buf[i]);
+
+ read_unlock_irqrestore(&dbg_data.lck, flags);
+
+ return n;
+}
+
+/**
+ * store_events: configure if events are going to be also printed to console
+ *
+ * Check "device.h" for details
+ */
+static ssize_t store_events(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int tty;
+
+ dbg_trace("[%s] %pK, %d\n", __func__, buf, count);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ goto done;
+ }
+
+ if (kstrtouint(buf, 10, &tty) || tty > 1) {
+ dev_err(dev, "<1|0>: enable|disable console log\n");
+ goto done;
+ }
+
+ dbg_data.tty = tty;
+ dev_info(dev, "tty = %u", dbg_data.tty);
+
+ done:
+ return count;
+}
+static DEVICE_ATTR(events, 0600, show_events, store_events);
+
+/**
+ * show_inters: interrupt status, enable status and historic
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_inters(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long flags;
+ u32 intr;
+ unsigned int i, j, n = 0;
+
+ dbg_trace("[%s] %pK\n", __func__, buf);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ spin_lock_irqsave(udc->lock, flags);
+
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "status = %08x\n", hw_read_intr_status());
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "enable = %08x\n", hw_read_intr_enable());
+
+ n += scnprintf(buf + n, PAGE_SIZE - n, "*test = %d\n",
+ isr_statistics.test);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "? ui = %d\n",
+ isr_statistics.ui);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "? uei = %d\n",
+ isr_statistics.uei);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "? pci = %d\n",
+ isr_statistics.pci);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "? uri = %d\n",
+ isr_statistics.uri);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "? sli = %d\n",
+ isr_statistics.sli);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "*none = %d\n",
+ isr_statistics.none);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "*hndl = %d\n",
+ isr_statistics.hndl.cnt);
+
+ for (i = isr_statistics.hndl.idx, j = 0; j <= ISR_MASK; j++, i++) {
+ i &= ISR_MASK;
+ intr = isr_statistics.hndl.buf[i];
+
+ if (USBi_UI & intr)
+ n += scnprintf(buf + n, PAGE_SIZE - n, "ui ");
+ intr &= ~USBi_UI;
+ if (USBi_UEI & intr)
+ n += scnprintf(buf + n, PAGE_SIZE - n, "uei ");
+ intr &= ~USBi_UEI;
+ if (USBi_PCI & intr)
+ n += scnprintf(buf + n, PAGE_SIZE - n, "pci ");
+ intr &= ~USBi_PCI;
+ if (USBi_URI & intr)
+ n += scnprintf(buf + n, PAGE_SIZE - n, "uri ");
+ intr &= ~USBi_URI;
+ if (USBi_SLI & intr)
+ n += scnprintf(buf + n, PAGE_SIZE - n, "sli ");
+ intr &= ~USBi_SLI;
+ if (intr)
+ n += scnprintf(buf + n, PAGE_SIZE - n, "??? ");
+ if (isr_statistics.hndl.buf[i])
+ n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
+ }
+
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ return n;
+}
+
+/**
+ * store_inters: enable & force or disable an individual interrutps
+ * (to be used for test purposes only)
+ *
+ * Check "device.h" for details
+ */
+static ssize_t store_inters(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long flags;
+ unsigned int en, bit;
+
+ dbg_trace("[%s] %pK, %d\n", __func__, buf, count);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ goto done;
+ }
+
+ if (sscanf(buf, "%u %u", &en, &bit) != 2 || en > 1) {
+ dev_err(dev, "<1|0> <bit>: enable|disable interrupt");
+ goto done;
+ }
+
+ spin_lock_irqsave(udc->lock, flags);
+ if (en) {
+ if (hw_intr_force(bit))
+ dev_err(dev, "invalid bit number\n");
+ else
+ isr_statistics.test++;
+ } else {
+ if (hw_intr_clear(bit))
+ dev_err(dev, "invalid bit number\n");
+ }
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ done:
+ return count;
+}
+static DEVICE_ATTR(inters, 0600, show_inters, store_inters);
+
+/**
+ * show_port_test: reads port test mode
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_port_test(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long flags;
+ unsigned int mode;
+
+ dbg_trace("[%s] %pK\n", __func__, buf);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ spin_lock_irqsave(udc->lock, flags);
+ mode = hw_port_test_get();
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ return scnprintf(buf, PAGE_SIZE, "mode = %u\n", mode);
+}
+
+/**
+ * store_port_test: writes port test mode
+ *
+ * Check "device.h" for details
+ */
+static ssize_t store_port_test(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long flags;
+ unsigned int mode;
+
+ dbg_trace("[%s] %pK, %d\n", __func__, buf, count);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ goto done;
+ }
+
+ if (kstrtouint(buf, 10, &mode)) {
+ dev_err(dev, "<mode>: set port test mode");
+ goto done;
+ }
+
+ spin_lock_irqsave(udc->lock, flags);
+ if (hw_port_test_set(mode))
+ dev_err(dev, "invalid mode\n");
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ done:
+ return count;
+}
+static DEVICE_ATTR(port_test, 0600, show_port_test, store_port_test);
+
+/**
+ * show_qheads: DMA contents of all queue heads
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_qheads(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long flags;
+ unsigned int i, j, n = 0;
+
+ dbg_trace("[%s] %pK\n", __func__, buf);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ spin_lock_irqsave(udc->lock, flags);
+ for (i = 0; i < hw_ep_max/2; i++) {
+ struct ci13xxx_ep *mEpRx = &udc->ci13xxx_ep[i];
+ struct ci13xxx_ep *mEpTx = &udc->ci13xxx_ep[i + hw_ep_max/2];
+
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "EP=%02i: RX=%08X TX=%08X\n",
+ i, (u32)mEpRx->qh.dma, (u32)mEpTx->qh.dma);
+ for (j = 0; j < (sizeof(struct ci13xxx_qh)/sizeof(u32)); j++) {
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ " %04X: %08X %08X\n", j,
+ *((u32 *)mEpRx->qh.ptr + j),
+ *((u32 *)mEpTx->qh.ptr + j));
+ }
+ }
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ return n;
+}
+static DEVICE_ATTR(qheads, 0400, show_qheads, NULL);
+
+/**
+ * show_registers: dumps all registers
+ *
+ * Check "device.h" for details
+ */
+#define DUMP_ENTRIES 512
+static ssize_t show_registers(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long flags;
+ u32 *dump;
+ unsigned int i, k, n = 0;
+
+ dbg_trace("[%s] %pK\n", __func__, buf);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ dump = kmalloc(sizeof(u32) * DUMP_ENTRIES, GFP_KERNEL);
+ if (!dump)
+ return 0;
+
+ spin_lock_irqsave(udc->lock, flags);
+ k = hw_register_read(dump, DUMP_ENTRIES);
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ for (i = 0; i < k; i++) {
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "reg[0x%04X] = 0x%08X\n",
+ i * (unsigned int)sizeof(u32), dump[i]);
+ }
+ kfree(dump);
+
+ return n;
+}
+
+/**
+ * store_registers: writes value to register address
+ *
+ * Check "device.h" for details
+ */
+static ssize_t store_registers(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long addr, data, flags;
+
+ dbg_trace("[%s] %pK, %d\n", __func__, buf, count);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ goto done;
+ }
+
+ if (sscanf(buf, "%li %li", &addr, &data) != 2) {
+ dev_err(dev, "<addr> <data>: write data to register address");
+ goto done;
+ }
+
+ spin_lock_irqsave(udc->lock, flags);
+ if (hw_register_write(addr, data))
+ dev_err(dev, "invalid address range\n");
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ done:
+ return count;
+}
+static DEVICE_ATTR(registers, 0600, show_registers, store_registers);
+
+/**
+ * show_requests: DMA contents of all requests currently queued (all endpts)
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_requests(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long flags;
+ struct list_head *ptr = NULL;
+ struct ci13xxx_req *req = NULL;
+ unsigned int i, j, n = 0, qSize = sizeof(struct ci13xxx_td)/sizeof(u32);
+
+ dbg_trace("[%s] %pK\n", __func__, buf);
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ spin_lock_irqsave(udc->lock, flags);
+ for (i = 0; i < hw_ep_max; i++)
+ list_for_each(ptr, &udc->ci13xxx_ep[i].qh.queue)
+ {
+ req = list_entry(ptr, struct ci13xxx_req, queue);
+
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "EP=%02i: TD=%08X %s\n",
+ i % hw_ep_max/2, (u32)req->dma,
+ ((i < hw_ep_max/2) ? "RX" : "TX"));
+
+ for (j = 0; j < qSize; j++)
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ " %04X: %08X\n", j,
+ *((u32 *)req->ptr + j));
+ }
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ return n;
+}
+static DEVICE_ATTR(requests, 0400, show_requests, NULL);
+
+/* EP# and Direction */
+static ssize_t prime_ept(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ struct ci13xxx_ep *mEp;
+ unsigned int ep_num, dir;
+ int n;
+ struct ci13xxx_req *mReq = NULL;
+
+ if (sscanf(buf, "%u %u", &ep_num, &dir) != 2) {
+ dev_err(dev, "<ep_num> <dir>: prime the ep");
+ goto done;
+ }
+
+ if (dir)
+ mEp = &udc->ci13xxx_ep[ep_num + hw_ep_max/2];
+ else
+ mEp = &udc->ci13xxx_ep[ep_num];
+
+ n = hw_ep_bit(mEp->num, mEp->dir);
+ mReq = list_entry(mEp->qh.queue.next, struct ci13xxx_req, queue);
+ mEp->qh.ptr->td.next = mReq->dma;
+ mEp->qh.ptr->td.token &= ~TD_STATUS;
+
+ /* Makes sure that above write goes through */
+ wmb();
+
+ hw_cwrite(CAP_ENDPTPRIME, BIT(n), BIT(n));
+ while (hw_cread(CAP_ENDPTPRIME, BIT(n)))
+ cpu_relax();
+
+ pr_info("%s: prime:%08x stat:%08x ep#%d dir:%s\n", __func__,
+ hw_cread(CAP_ENDPTPRIME, ~0),
+ hw_cread(CAP_ENDPTSTAT, ~0),
+ mEp->num, mEp->dir ? "IN" : "OUT");
+done:
+ return count;
+
+}
+static DEVICE_ATTR(prime, 0200, NULL, prime_ept);
+
+/* EP# and Direction */
+static ssize_t print_dtds(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ struct ci13xxx_ep *mEp;
+ unsigned int ep_num, dir;
+ int n;
+ struct list_head *ptr = NULL;
+ struct ci13xxx_req *req = NULL;
+
+ if (sscanf(buf, "%u %u", &ep_num, &dir) != 2) {
+ dev_err(dev, "<ep_num> <dir>: to print dtds");
+ goto done;
+ }
+
+ if (dir)
+ mEp = &udc->ci13xxx_ep[ep_num + hw_ep_max/2];
+ else
+ mEp = &udc->ci13xxx_ep[ep_num];
+
+ n = hw_ep_bit(mEp->num, mEp->dir);
+ pr_info("%s: prime:%08x stat:%08x ep#%d dir:%s dTD_update_fail_count: %lu mEp->dTD_update_fail_count: %lu mEp->dTD_active_re_q_count: %lu mEp->prime_fail_count: %lu\n",
+ __func__,
+ hw_cread(CAP_ENDPTPRIME, ~0),
+ hw_cread(CAP_ENDPTSTAT, ~0),
+ mEp->num, mEp->dir ? "IN" : "OUT",
+ udc->dTD_update_fail_count,
+ mEp->dTD_update_fail_count,
+ mEp->dTD_active_re_q_count,
+ mEp->prime_fail_count);
+
+ pr_info("QH: cap:%08x cur:%08x next:%08x token:%08x\n",
+ mEp->qh.ptr->cap, mEp->qh.ptr->curr,
+ mEp->qh.ptr->td.next, mEp->qh.ptr->td.token);
+
+ list_for_each(ptr, &mEp->qh.queue) {
+ req = list_entry(ptr, struct ci13xxx_req, queue);
+
+ pr_info("\treq:%pKa next:%08x token:%08x page0:%08x status:%d\n",
+ &req->dma, req->ptr->next, req->ptr->token,
+ req->ptr->page[0], req->req.status);
+ }
+done:
+ return count;
+
+}
+static DEVICE_ATTR(dtds, 0200, NULL, print_dtds);
+
+static int ci13xxx_wakeup(struct usb_gadget *_gadget)
+{
+ struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
+ unsigned long flags;
+ int ret = 0;
+
+ trace();
+
+ spin_lock_irqsave(udc->lock, flags);
+ if (!udc->gadget.remote_wakeup) {
+ ret = -EOPNOTSUPP;
+ dbg_trace("remote wakeup feature is not enabled\n");
+ goto out;
+ }
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ pm_runtime_get_sync(&_gadget->dev);
+
+ udc->udc_driver->notify_event(udc,
+ CI13XXX_CONTROLLER_REMOTE_WAKEUP_EVENT);
+
+ if (udc->transceiver)
+ usb_phy_set_suspend(udc->transceiver, 0);
+
+ spin_lock_irqsave(udc->lock, flags);
+ if (!hw_cread(CAP_PORTSC, PORTSC_SUSP)) {
+ ret = -EINVAL;
+ dbg_trace("port is not suspended\n");
+ pm_runtime_put(&_gadget->dev);
+ goto out;
+ }
+ hw_cwrite(CAP_PORTSC, PORTSC_FPR, PORTSC_FPR);
+
+ pm_runtime_mark_last_busy(&_gadget->dev);
+ pm_runtime_put_autosuspend(&_gadget->dev);
+out:
+ spin_unlock_irqrestore(udc->lock, flags);
+ return ret;
+}
+
+static void usb_do_remote_wakeup(struct work_struct *w)
+{
+ struct ci13xxx *udc = _udc;
+ unsigned long flags;
+ bool do_wake;
+
+ /*
+ * This work can not be canceled from interrupt handler. Check
+ * if wakeup conditions are still met.
+ */
+ spin_lock_irqsave(udc->lock, flags);
+ do_wake = udc->suspended && udc->gadget.remote_wakeup;
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ if (do_wake)
+ ci13xxx_wakeup(&udc->gadget);
+}
+
+static ssize_t usb_remote_wakeup(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+
+ ci13xxx_wakeup(&udc->gadget);
+
+ return count;
+}
+static DEVICE_ATTR(wakeup, 0200, 0, usb_remote_wakeup);
+
+/**
+ * dbg_create_files: initializes the attribute interface
+ * @dev: device
+ *
+ * This function returns an error code
+ */
+static int __maybe_unused dbg_create_files(struct device *dev)
+{
+ int retval = 0;
+
+ if (dev == NULL)
+ return -EINVAL;
+ retval = device_create_file(dev, &dev_attr_device);
+ if (retval)
+ goto done;
+ retval = device_create_file(dev, &dev_attr_driver);
+ if (retval)
+ goto rm_device;
+ retval = device_create_file(dev, &dev_attr_events);
+ if (retval)
+ goto rm_driver;
+ retval = device_create_file(dev, &dev_attr_inters);
+ if (retval)
+ goto rm_events;
+ retval = device_create_file(dev, &dev_attr_port_test);
+ if (retval)
+ goto rm_inters;
+ retval = device_create_file(dev, &dev_attr_qheads);
+ if (retval)
+ goto rm_port_test;
+ retval = device_create_file(dev, &dev_attr_registers);
+ if (retval)
+ goto rm_qheads;
+ retval = device_create_file(dev, &dev_attr_requests);
+ if (retval)
+ goto rm_registers;
+ retval = device_create_file(dev, &dev_attr_wakeup);
+ if (retval)
+ goto rm_remote_wakeup;
+ retval = device_create_file(dev, &dev_attr_prime);
+ if (retval)
+ goto rm_prime;
+ retval = device_create_file(dev, &dev_attr_dtds);
+ if (retval)
+ goto rm_dtds;
+
+ return 0;
+
+rm_dtds:
+ device_remove_file(dev, &dev_attr_dtds);
+rm_prime:
+ device_remove_file(dev, &dev_attr_prime);
+rm_remote_wakeup:
+ device_remove_file(dev, &dev_attr_wakeup);
+ rm_registers:
+ device_remove_file(dev, &dev_attr_registers);
+ rm_qheads:
+ device_remove_file(dev, &dev_attr_qheads);
+ rm_port_test:
+ device_remove_file(dev, &dev_attr_port_test);
+ rm_inters:
+ device_remove_file(dev, &dev_attr_inters);
+ rm_events:
+ device_remove_file(dev, &dev_attr_events);
+ rm_driver:
+ device_remove_file(dev, &dev_attr_driver);
+ rm_device:
+ device_remove_file(dev, &dev_attr_device);
+ done:
+ return retval;
+}
+
+/**
+ * dbg_remove_files: destroys the attribute interface
+ * @dev: device
+ *
+ * This function returns an error code
+ */
+static int __maybe_unused dbg_remove_files(struct device *dev)
+{
+ if (dev == NULL)
+ return -EINVAL;
+ device_remove_file(dev, &dev_attr_requests);
+ device_remove_file(dev, &dev_attr_registers);
+ device_remove_file(dev, &dev_attr_qheads);
+ device_remove_file(dev, &dev_attr_port_test);
+ device_remove_file(dev, &dev_attr_inters);
+ device_remove_file(dev, &dev_attr_events);
+ device_remove_file(dev, &dev_attr_driver);
+ device_remove_file(dev, &dev_attr_device);
+ device_remove_file(dev, &dev_attr_wakeup);
+ return 0;
+}
+
+/******************************************************************************
+ * UTIL block
+ *****************************************************************************/
+/**
+ * _usb_addr: calculates endpoint address from direction & number
+ * @ep: endpoint
+ */
+static inline u8 _usb_addr(struct ci13xxx_ep *ep)
+{
+ return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
+}
+
+static void ep_prime_timer_func(unsigned long data)
+{
+ struct ci13xxx_ep *mep = (struct ci13xxx_ep *)data;
+ struct ci13xxx_req *req;
+ struct list_head *ptr = NULL;
+ int n = hw_ep_bit(mep->num, mep->dir);
+ unsigned long flags;
+
+
+ spin_lock_irqsave(mep->lock, flags);
+
+ if (_udc && (!_udc->vbus_active || _udc->suspended)) {
+ pr_debug("ep%d%s prime timer when vbus_active=%d,suspend=%d\n",
+ mep->num, mep->dir ? "IN" : "OUT",
+ _udc->vbus_active, _udc->suspended);
+ goto out;
+ }
+
+ if (!hw_cread(CAP_ENDPTPRIME, BIT(n)))
+ goto out;
+
+ if (list_empty(&mep->qh.queue))
+ goto out;
+
+ req = list_entry(mep->qh.queue.next, struct ci13xxx_req, queue);
+
+ /* clean speculative fetches on req->ptr->token */
+ mb();
+ if (!(TD_STATUS_ACTIVE & req->ptr->token))
+ goto out;
+
+ mep->prime_timer_count++;
+ if (mep->prime_timer_count == MAX_PRIME_CHECK_RETRY) {
+ mep->prime_timer_count = 0;
+ pr_info("ep%d dir:%s QH:cap:%08x cur:%08x next:%08x tkn:%08x\n",
+ mep->num, mep->dir ? "IN" : "OUT",
+ mep->qh.ptr->cap, mep->qh.ptr->curr,
+ mep->qh.ptr->td.next, mep->qh.ptr->td.token);
+ list_for_each(ptr, &mep->qh.queue) {
+ req = list_entry(ptr, struct ci13xxx_req, queue);
+ pr_info("\treq:%pKa:%08xtkn:%08xpage0:%08xsts:%d\n",
+ &req->dma, req->ptr->next,
+ req->ptr->token, req->ptr->page[0],
+ req->req.status);
+ }
+ dbg_usb_op_fail(0xFF, "PRIMEF", mep);
+ mep->prime_fail_count++;
+ } else {
+ mod_timer(&mep->prime_timer, EP_PRIME_CHECK_DELAY);
+ }
+
+ spin_unlock_irqrestore(mep->lock, flags);
+ return;
+
+out:
+ mep->prime_timer_count = 0;
+ spin_unlock_irqrestore(mep->lock, flags);
+
+}
+
+/**
+ * _hardware_queue: configures a request at hardware level
+ * @gadget: gadget
+ * @mEp: endpoint
+ *
+ * This function returns an error code
+ */
+static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
+{
+ unsigned int i;
+ int ret = 0;
+ unsigned int length = mReq->req.length;
+ struct ci13xxx *udc = _udc;
+
+ trace("%pK, %pK", mEp, mReq);
+
+ /* don't queue twice */
+ if (mReq->req.status == -EALREADY)
+ return -EALREADY;
+
+ mReq->req.status = -EALREADY;
+ if (length && mReq->req.dma == DMA_ERROR_CODE) {
+ mReq->req.dma = dma_map_single(mEp->device, mReq->req.buf,
+ length, mEp->dir ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE);
+ if (mReq->req.dma == 0)
+ return -ENOMEM;
+
+ mReq->map = 1;
+ }
+
+ if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0)) {
+ mReq->zptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC,
+ &mReq->zdma);
+ if (mReq->zptr == NULL) {
+ if (mReq->map) {
+ dma_unmap_single(mEp->device, mReq->req.dma,
+ length, mEp->dir ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE);
+ mReq->req.dma = DMA_ERROR_CODE;
+ mReq->map = 0;
+ }
+ return -ENOMEM;
+ }
+ memset(mReq->zptr, 0, sizeof(*mReq->zptr));
+ mReq->zptr->next = TD_TERMINATE;
+ mReq->zptr->token = TD_STATUS_ACTIVE;
+ if (!mReq->req.no_interrupt)
+ mReq->zptr->token |= TD_IOC;
+ }
+
+ /*
+ * TD configuration
+ * TODO - handle requests which spawns into several TDs
+ */
+ memset(mReq->ptr, 0, sizeof(*mReq->ptr));
+ mReq->ptr->token = length << ffs_nr(TD_TOTAL_BYTES);
+ mReq->ptr->token &= TD_TOTAL_BYTES;
+ mReq->ptr->token |= TD_STATUS_ACTIVE;
+ if (mReq->zptr) {
+ mReq->ptr->next = mReq->zdma;
+ } else {
+ mReq->ptr->next = TD_TERMINATE;
+ if (!mReq->req.no_interrupt)
+ mReq->ptr->token |= TD_IOC;
+ }
+
+ /* MSM Specific: updating the request as required for
+ * SPS mode. Enable MSM DMA engine according
+ * to the UDC private data in the request.
+ */
+ if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) {
+ if (mReq->req.udc_priv & MSM_SPS_MODE) {
+ mReq->ptr->token = TD_STATUS_ACTIVE;
+ if (mReq->req.udc_priv & MSM_IS_FINITE_TRANSFER)
+ mReq->ptr->next = TD_TERMINATE;
+ else
+ mReq->ptr->next = MSM_ETD_TYPE | mReq->dma;
+ if (!mReq->req.no_interrupt)
+ mReq->ptr->token |= MSM_ETD_IOC;
+ }
+ mReq->req.dma = 0;
+ }
+
+ mReq->ptr->page[0] = mReq->req.dma;
+ for (i = 1; i < 5; i++)
+ mReq->ptr->page[i] = (mReq->req.dma + i * CI13XXX_PAGE_SIZE) &
+ ~TD_RESERVED_MASK;
+ /* Makes sure that above write goes through */
+ wmb();
+
+ /* Remote Wakeup */
+ if (udc->suspended) {
+ if (!udc->gadget.remote_wakeup) {
+ mReq->req.status = -EAGAIN;
+
+ dev_dbg(mEp->device, "%s: queue failed (suspend).",
+ __func__);
+ dev_dbg(mEp->device, "%s: Remote wakeup is not supported. ept #%d\n",
+ __func__, mEp->num);
+
+ return -EAGAIN;
+ }
+
+ usb_phy_set_suspend(udc->transceiver, 0);
+ schedule_delayed_work(&udc->rw_work, REMOTE_WAKEUP_DELAY);
+ }
+
+ if (!list_empty(&mEp->qh.queue)) {
+ struct ci13xxx_req *mReqPrev;
+ int n = hw_ep_bit(mEp->num, mEp->dir);
+ int tmp_stat;
+ ktime_t start, diff;
+
+ mReqPrev = list_entry(mEp->qh.queue.prev,
+ struct ci13xxx_req, queue);
+ if (mReqPrev->zptr)
+ mReqPrev->zptr->next = mReq->dma & TD_ADDR_MASK;
+ else
+ mReqPrev->ptr->next = mReq->dma & TD_ADDR_MASK;
+ /* Makes sure that above write goes through */
+ wmb();
+ if (hw_cread(CAP_ENDPTPRIME, BIT(n)))
+ goto done;
+ start = ktime_get();
+ do {
+ hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
+ tmp_stat = hw_cread(CAP_ENDPTSTAT, BIT(n));
+ diff = ktime_sub(ktime_get(), start);
+ /* poll for max. 100ms */
+ if (ktime_to_ms(diff) > USB_MAX_TIMEOUT) {
+ if (hw_cread(CAP_USBCMD, USBCMD_ATDTW))
+ break;
+ printk_ratelimited(KERN_ERR
+ "%s:queue failed ep#%d %s\n",
+ __func__, mEp->num, mEp->dir ? "IN" : "OUT");
+ return -EAGAIN;
+ }
+ } while (!hw_cread(CAP_USBCMD, USBCMD_ATDTW));
+ hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, 0);
+ if (tmp_stat)
+ goto done;
+ }
+
+ /* Hardware may leave few TDs unprocessed, check and reprime with 1st */
+ if (!list_empty(&mEp->qh.queue)) {
+ struct ci13xxx_req *mReq_active, *mReq_next;
+ u32 i = 0;
+
+ /* Nothing to be done if hardware already finished this TD */
+ if ((TD_STATUS_ACTIVE & mReq->ptr->token) == 0)
+ goto done;
+
+ /* Iterate forward to find first TD with ACTIVE bit set */
+ mReq_active = mReq;
+ list_for_each_entry(mReq_next, &mEp->qh.queue, queue) {
+ i++;
+ mEp->dTD_active_re_q_count++;
+ if (TD_STATUS_ACTIVE & mReq_next->ptr->token) {
+ mReq_active = mReq_next;
+ dbg_event(_usb_addr(mEp), "ReQUE",
+ mReq_next->ptr->token);
+ pr_debug("!!ReQ(%u-%u-%x)-%u!!\n", mEp->num,
+ mEp->dir, mReq_next->ptr->token, i);
+ break;
+ }
+ }
+
+ /* QH configuration */
+ mEp->qh.ptr->td.next = mReq_active->dma;
+ mEp->qh.ptr->td.token &= ~TD_STATUS;
+ goto prime;
+ }
+
+ /* QH configuration */
+ mEp->qh.ptr->td.next = mReq->dma; /* TERMINATE = 0 */
+
+ if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) {
+ if (mReq->req.udc_priv & MSM_SPS_MODE) {
+ mEp->qh.ptr->td.next |= MSM_ETD_TYPE;
+ i = hw_cread(CAP_ENDPTPIPEID +
+ mEp->num * sizeof(u32), ~0);
+ /* Read current value of this EPs pipe id */
+ i = (mEp->dir == TX) ?
+ ((i >> MSM_TX_PIPE_ID_OFS) & MSM_PIPE_ID_MASK) :
+ (i & MSM_PIPE_ID_MASK);
+ /*
+ * If requested pipe id is different from current,
+ * then write it
+ */
+ if (i != (mReq->req.udc_priv & MSM_PIPE_ID_MASK)) {
+ if (mEp->dir == TX)
+ hw_cwrite(
+ CAP_ENDPTPIPEID +
+ mEp->num * sizeof(u32),
+ MSM_PIPE_ID_MASK <<
+ MSM_TX_PIPE_ID_OFS,
+ (mReq->req.udc_priv &
+ MSM_PIPE_ID_MASK)
+ << MSM_TX_PIPE_ID_OFS);
+ else
+ hw_cwrite(
+ CAP_ENDPTPIPEID +
+ mEp->num * sizeof(u32),
+ MSM_PIPE_ID_MASK,
+ mReq->req.udc_priv &
+ MSM_PIPE_ID_MASK);
+ }
+ }
+ }
+
+ mEp->qh.ptr->td.token &= ~TD_STATUS; /* clear status */
+ mEp->qh.ptr->cap |= QH_ZLT;
+
+prime:
+ /* Makes sure that above write goes through */
+ wmb(); /* synchronize before ep prime */
+
+ ret = hw_ep_prime(mEp->num, mEp->dir,
+ mEp->type == USB_ENDPOINT_XFER_CONTROL);
+ if (!ret)
+ mod_timer(&mEp->prime_timer, EP_PRIME_CHECK_DELAY);
+
+done:
+ return ret;
+}
+
+/**
+ * _hardware_dequeue: handles a request at hardware level
+ * @gadget: gadget
+ * @mEp: endpoint
+ *
+ * This function returns an error code
+ */
+static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
+{
+ trace("%pK, %pK", mEp, mReq);
+
+ if (mReq->req.status != -EALREADY)
+ return -EINVAL;
+
+ /* clean speculative fetches on req->ptr->token */
+ mb();
+
+ if ((TD_STATUS_ACTIVE & mReq->ptr->token) != 0)
+ return -EBUSY;
+
+ if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID)
+ if ((mReq->req.udc_priv & MSM_SPS_MODE) &&
+ (mReq->req.udc_priv & MSM_IS_FINITE_TRANSFER))
+ return -EBUSY;
+ if (mReq->zptr) {
+ if ((TD_STATUS_ACTIVE & mReq->zptr->token) != 0)
+ return -EBUSY;
+
+ /* The controller may access this dTD one more time.
+ * Defer freeing this to next zero length dTD completion.
+ * It is safe to assume that controller will no longer
+ * access the previous dTD after next dTD completion.
+ */
+ if (mEp->last_zptr)
+ dma_pool_free(mEp->td_pool, mEp->last_zptr,
+ mEp->last_zdma);
+ mEp->last_zptr = mReq->zptr;
+ mEp->last_zdma = mReq->zdma;
+
+ mReq->zptr = NULL;
+ }
+
+ mReq->req.status = 0;
+
+ if (mReq->map) {
+ dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
+ mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ mReq->req.dma = DMA_ERROR_CODE;
+ mReq->map = 0;
+ }
+
+ mReq->req.status = mReq->ptr->token & TD_STATUS;
+ if ((TD_STATUS_HALTED & mReq->req.status) != 0)
+ mReq->req.status = -1;
+ else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0)
+ mReq->req.status = -1;
+ else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0)
+ mReq->req.status = -1;
+
+ mReq->req.actual = mReq->ptr->token & TD_TOTAL_BYTES;
+ mReq->req.actual >>= ffs_nr(TD_TOTAL_BYTES);
+ mReq->req.actual = mReq->req.length - mReq->req.actual;
+ mReq->req.actual = mReq->req.status ? 0 : mReq->req.actual;
+
+ return mReq->req.actual;
+}
+
+/**
+ * purge_rw_queue: Purge requests pending at the remote-wakeup
+ * queue and send them to the HW.
+ *
+ * Go over all of the endpoints and push any pending requests to
+ * the HW queue.
+ */
+static void purge_rw_queue(struct ci13xxx *udc)
+{
+ int i;
+ struct ci13xxx_ep *mEp = NULL;
+ struct ci13xxx_req *mReq = NULL;
+
+ /*
+ * Go over all of the endpoints and push any pending requests to
+ * the HW queue.
+ */
+ for (i = 0; i < hw_ep_max; i++) {
+ mEp = &udc->ci13xxx_ep[i];
+
+ while (!list_empty(&udc->ci13xxx_ep[i].rw_queue)) {
+ int retval;
+
+ /* pop oldest request */
+ mReq = list_entry(udc->ci13xxx_ep[i].rw_queue.next,
+ struct ci13xxx_req, queue);
+
+ list_del_init(&mReq->queue);
+
+ retval = _hardware_enqueue(mEp, mReq);
+
+ if (retval != 0) {
+ dbg_event(_usb_addr(mEp), "QUEUE", retval);
+ mReq->req.status = retval;
+ if (mReq->req.complete != NULL) {
+ if (mEp->type ==
+ USB_ENDPOINT_XFER_CONTROL)
+ mReq->req.complete(
+ &(_udc->ep0in.ep),
+ &mReq->req);
+ else
+ mReq->req.complete(
+ &mEp->ep,
+ &mReq->req);
+ }
+ retval = 0;
+ }
+
+ if (!retval)
+ list_add_tail(&mReq->queue, &mEp->qh.queue);
+ else if (mEp->multi_req)
+ mEp->multi_req = false;
+
+ }
+ }
+
+ udc->rw_pending = false;
+}
+
+/**
+ * restore_original_req: Restore original req's attributes
+ * @mReq: Request
+ *
+ * This function restores original req's attributes. Call
+ * this function before completing the large req (>16K).
+ */
+static void restore_original_req(struct ci13xxx_req *mReq)
+{
+ mReq->req.buf = mReq->multi.buf;
+ mReq->req.length = mReq->multi.len;
+ if (!mReq->req.status)
+ mReq->req.actual = mReq->multi.actual;
+
+ mReq->multi.len = 0;
+ mReq->multi.actual = 0;
+ mReq->multi.buf = NULL;
+}
+
+/**
+ * release_ep_request: Free and endpoint request and release
+ * resources
+ * @mReq: request
+ * @mEp: endpoint
+ *
+ */
+static void release_ep_request(struct ci13xxx_ep *mEp,
+ struct ci13xxx_req *mReq)
+{
+ struct ci13xxx_ep *mEpTemp = mEp;
+
+ unsigned int val;
+
+ /* MSM Specific: Clear end point specific register */
+ if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) {
+ if (mReq->req.udc_priv & MSM_SPS_MODE) {
+ val = hw_cread(CAP_ENDPTPIPEID +
+ mEp->num * sizeof(u32),
+ ~0);
+
+ if (val != MSM_EP_PIPE_ID_RESET_VAL)
+ hw_cwrite(
+ CAP_ENDPTPIPEID +
+ mEp->num * sizeof(u32),
+ ~0, MSM_EP_PIPE_ID_RESET_VAL);
+ }
+ }
+ mReq->req.status = -ESHUTDOWN;
+
+ if (mReq->map) {
+ dma_unmap_single(mEp->device, mReq->req.dma,
+ mReq->req.length,
+ mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ mReq->req.dma = DMA_ERROR_CODE;
+ mReq->map = 0;
+ }
+
+ if (mReq->zptr) {
+ dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma);
+ mReq->zptr = NULL;
+ mReq->zdma = 0;
+ }
+
+ if (mEp->multi_req) {
+ restore_original_req(mReq);
+ mEp->multi_req = false;
+ }
+
+ if (mReq->req.complete != NULL) {
+ spin_unlock(mEp->lock);
+ if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
+ mReq->req.length)
+ mEpTemp = &_udc->ep0in;
+ mReq->req.complete(&mEpTemp->ep, &mReq->req);
+ if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+ mReq->req.complete = NULL;
+ spin_lock(mEp->lock);
+ }
+}
+
+/**
+ * _ep_nuke: dequeues all endpoint requests
+ * @mEp: endpoint
+ *
+ * This function returns an error code
+ * Caller must hold lock
+ */
+static int _ep_nuke(struct ci13xxx_ep *mEp)
+__releases(mEp->lock)
+__acquires(mEp->lock)
+{
+ trace("%pK", mEp);
+
+ if (mEp == NULL)
+ return -EINVAL;
+
+ del_timer(&mEp->prime_timer);
+ mEp->prime_timer_count = 0;
+
+ hw_ep_flush(mEp->num, mEp->dir);
+
+ while (!list_empty(&mEp->qh.queue)) {
+ /* pop oldest request */
+ struct ci13xxx_req *mReq =
+ list_entry(mEp->qh.queue.next,
+ struct ci13xxx_req, queue);
+ list_del_init(&mReq->queue);
+
+ release_ep_request(mEp, mReq);
+ }
+
+ /* Clear the requests pending at the remote-wakeup queue */
+ while (!list_empty(&mEp->rw_queue)) {
+
+ /* pop oldest request */
+ struct ci13xxx_req *mReq =
+ list_entry(mEp->rw_queue.next,
+ struct ci13xxx_req, queue);
+
+ list_del_init(&mReq->queue);
+
+ release_ep_request(mEp, mReq);
+ }
+
+ if (mEp->last_zptr) {
+ dma_pool_free(mEp->td_pool, mEp->last_zptr, mEp->last_zdma);
+ mEp->last_zptr = NULL;
+ mEp->last_zdma = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
+ * @gadget: gadget
+ *
+ * This function returns an error code
+ */
+static int _gadget_stop_activity(struct usb_gadget *gadget)
+{
+ struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
+ unsigned long flags;
+
+ trace("%pK", gadget);
+
+ if (gadget == NULL)
+ return -EINVAL;
+
+ spin_lock_irqsave(udc->lock, flags);
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->gadget.remote_wakeup = 0;
+ udc->suspended = 0;
+ udc->configured = 0;
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ udc->driver->disconnect(gadget);
+
+ spin_lock_irqsave(udc->lock, flags);
+ _ep_nuke(&udc->ep0out);
+ _ep_nuke(&udc->ep0in);
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ return 0;
+}
+
+/******************************************************************************
+ * ISR block
+ *****************************************************************************/
+/**
+ * isr_reset_handler: USB reset interrupt handler
+ * @udc: UDC device
+ *
+ * This function resets USB engine after a bus reset occurred
+ */
+static void isr_reset_handler(struct ci13xxx *udc)
+__releases(udc->lock)
+__acquires(udc->lock)
+{
+ int retval;
+
+ trace("%pK", udc);
+
+ if (udc == NULL) {
+ err("EINVAL");
+ return;
+ }
+
+ dbg_event(0xFF, "BUS RST", 0);
+
+ spin_unlock(udc->lock);
+
+ if (udc->suspended) {
+ if (udc->udc_driver->notify_event)
+ udc->udc_driver->notify_event(udc,
+ CI13XXX_CONTROLLER_RESUME_EVENT);
+ if (udc->transceiver)
+ usb_phy_set_suspend(udc->transceiver, 0);
+ udc->driver->resume(&udc->gadget);
+ udc->suspended = 0;
+ }
+
+ /*stop charging upon reset */
+ if (udc->transceiver)
+ usb_phy_set_power(udc->transceiver, 100);
+
+ retval = _gadget_stop_activity(&udc->gadget);
+ if (retval)
+ goto done;
+
+ if (udc->rw_pending)
+ purge_rw_queue(udc);
+
+ _udc->skip_flush = false;
+ retval = hw_usb_reset();
+ if (retval)
+ goto done;
+
+ spin_lock(udc->lock);
+
+ done:
+ if (retval)
+ err("error: %i", retval);
+}
+
+/**
+ * isr_resume_handler: USB PCI interrupt handler
+ * @udc: UDC device
+ *
+ */
+static void isr_resume_handler(struct ci13xxx *udc)
+{
+ udc->gadget.speed = hw_port_is_high_speed() ?
+ USB_SPEED_HIGH : USB_SPEED_FULL;
+ if (udc->suspended) {
+ spin_unlock(udc->lock);
+ if (udc->udc_driver->notify_event)
+ udc->udc_driver->notify_event(udc,
+ CI13XXX_CONTROLLER_RESUME_EVENT);
+ if (udc->transceiver)
+ usb_phy_set_suspend(udc->transceiver, 0);
+ udc->suspended = 0;
+ udc->driver->resume(&udc->gadget);
+ spin_lock(udc->lock);
+
+ if (udc->rw_pending)
+ purge_rw_queue(udc);
+
+ }
+}
+
+/**
+ * isr_resume_handler: USB SLI interrupt handler
+ * @udc: UDC device
+ *
+ */
+static void isr_suspend_handler(struct ci13xxx *udc)
+{
+ if (udc->gadget.speed != USB_SPEED_UNKNOWN &&
+ udc->vbus_active) {
+ if (udc->suspended == 0) {
+ spin_unlock(udc->lock);
+ udc->driver->suspend(&udc->gadget);
+ if (udc->udc_driver->notify_event)
+ udc->udc_driver->notify_event(udc,
+ CI13XXX_CONTROLLER_SUSPEND_EVENT);
+ if (udc->transceiver)
+ usb_phy_set_suspend(udc->transceiver, 1);
+ spin_lock(udc->lock);
+ udc->suspended = 1;
+ }
+ }
+}
+
+/**
+ * isr_get_status_complete: get_status request complete function
+ * @ep: endpoint
+ * @req: request handled
+ *
+ * Caller must release lock
+ */
+static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ trace("%pK, %pK", ep, req);
+
+ if (ep == NULL || req == NULL) {
+ err("EINVAL");
+ return;
+ }
+
+ if (req->status)
+ err("GET_STATUS failed");
+}
+
+/**
+ * isr_get_status_response: get_status request response
+ * @udc: udc struct
+ * @setup: setup request packet
+ *
+ * This function returns an error code
+ */
+static int isr_get_status_response(struct ci13xxx *udc,
+ struct usb_ctrlrequest *setup)
+__releases(mEp->lock)
+__acquires(mEp->lock)
+{
+ struct ci13xxx_ep *mEp = &udc->ep0in;
+ struct usb_request *req = udc->status;
+ int dir, num, retval;
+
+ trace("%pK, %pK", mEp, setup);
+
+ if (mEp == NULL || setup == NULL)
+ return -EINVAL;
+
+ req->complete = isr_get_status_complete;
+ req->length = 2;
+ req->buf = udc->status_buf;
+
+ if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
+ /* Assume that device is bus powered for now. */
+ *((u16 *)req->buf) = _udc->gadget.remote_wakeup << 1;
+ retval = 0;
+ } else if ((setup->bRequestType & USB_RECIP_MASK) ==
+ USB_RECIP_ENDPOINT) {
+ dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
+ TX : RX;
+ num = le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
+ *((u16 *)req->buf) = hw_ep_get_halt(num, dir);
+ }
+ /* else do nothing; reserved for future use */
+
+ spin_unlock(mEp->lock);
+ retval = usb_ep_queue(&mEp->ep, req, GFP_ATOMIC);
+ spin_lock(mEp->lock);
+ return retval;
+}
+
+/**
+ * isr_setup_status_complete: setup_status request complete function
+ * @ep: endpoint
+ * @req: request handled
+ *
+ * Caller must release lock. Put the port in test mode if test mode
+ * feature is selected.
+ */
+static void
+isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct ci13xxx *udc = req->context;
+ unsigned long flags;
+
+ trace("%pK, %pK", ep, req);
+
+ spin_lock_irqsave(udc->lock, flags);
+ if (udc->test_mode)
+ hw_port_test_set(udc->test_mode);
+ spin_unlock_irqrestore(udc->lock, flags);
+}
+
+/**
+ * isr_setup_status_phase: queues the status phase of a setup transation
+ * @udc: udc struct
+ *
+ * This function returns an error code
+ */
+static int isr_setup_status_phase(struct ci13xxx *udc)
+__releases(mEp->lock)
+__acquires(mEp->lock)
+{
+ int retval;
+ struct ci13xxx_ep *mEp;
+
+ trace("%pK", udc);
+
+ mEp = (udc->ep0_dir == TX) ? &udc->ep0out : &udc->ep0in;
+ udc->status->context = udc;
+ udc->status->complete = isr_setup_status_complete;
+ udc->status->length = 0;
+
+ spin_unlock(mEp->lock);
+ retval = usb_ep_queue(&mEp->ep, udc->status, GFP_ATOMIC);
+ spin_lock(mEp->lock);
+
+ return retval;
+}
+
+/**
+ * isr_tr_complete_low: transaction complete low level handler
+ * @mEp: endpoint
+ *
+ * This function returns an error code
+ * Caller must hold lock
+ */
+static int isr_tr_complete_low(struct ci13xxx_ep *mEp)
+__releases(mEp->lock)
+__acquires(mEp->lock)
+{
+ struct ci13xxx_req *mReq, *mReqTemp;
+ struct ci13xxx_ep *mEpTemp = mEp;
+ int retval = 0;
+ int req_dequeue = 1;
+ struct ci13xxx *udc = _udc;
+
+ trace("%pK", mEp);
+
+ if (list_empty(&mEp->qh.queue))
+ return 0;
+
+ del_timer(&mEp->prime_timer);
+ mEp->prime_timer_count = 0;
+ list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue,
+ queue) {
+dequeue:
+ retval = _hardware_dequeue(mEp, mReq);
+ if (retval < 0) {
+ /*
+ * FIXME: don't know exact delay
+ * required for HW to update dTD status
+ * bits. This is a temporary workaround till
+ * HW designers come back on this.
+ */
+ if (retval == -EBUSY && req_dequeue &&
+ (mEp->dir == 0 || mEp->num == 0)) {
+ req_dequeue = 0;
+ udc->dTD_update_fail_count++;
+ mEp->dTD_update_fail_count++;
+ udelay(10);
+ goto dequeue;
+ }
+ break;
+ }
+ req_dequeue = 0;
+
+ if (mEp->multi_req) { /* Large request in progress */
+ unsigned int remain_len;
+
+ mReq->multi.actual += mReq->req.actual;
+ remain_len = mReq->multi.len - mReq->multi.actual;
+ if (mReq->req.status || !remain_len ||
+ (mReq->req.actual != mReq->req.length)) {
+ restore_original_req(mReq);
+ mEp->multi_req = false;
+ } else {
+ mReq->req.buf = mReq->multi.buf +
+ mReq->multi.actual;
+ mReq->req.length = min_t(unsigned int,
+ remain_len,
+ 4 * CI13XXX_PAGE_SIZE);
+
+ mReq->req.status = -EINPROGRESS;
+ mReq->req.actual = 0;
+ list_del_init(&mReq->queue);
+ retval = _hardware_enqueue(mEp, mReq);
+ if (retval) {
+ err("Large req failed in middle");
+ mReq->req.status = retval;
+ restore_original_req(mReq);
+ mEp->multi_req = false;
+ goto done;
+ } else {
+ list_add_tail(&mReq->queue,
+ &mEp->qh.queue);
+ return 0;
+ }
+ }
+ }
+ list_del_init(&mReq->queue);
+done:
+
+ dbg_done(_usb_addr(mEp), mReq->ptr->token, retval);
+
+ if (mReq->req.complete != NULL) {
+ spin_unlock(mEp->lock);
+ if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
+ mReq->req.length)
+ mEpTemp = &_udc->ep0in;
+ mReq->req.complete(&mEpTemp->ep, &mReq->req);
+ spin_lock(mEp->lock);
+ }
+ }
+
+ if (retval == -EBUSY)
+ retval = 0;
+ if (retval < 0)
+ dbg_event(_usb_addr(mEp), "DONE", retval);
+
+ return retval;
+}
+
+/**
+ * isr_tr_complete_handler: transaction complete interrupt handler
+ * @udc: UDC descriptor
+ *
+ * This function handles traffic events
+ */
+static void isr_tr_complete_handler(struct ci13xxx *udc)
+__releases(udc->lock)
+__acquires(udc->lock)
+{
+ unsigned int i;
+ u8 tmode = 0;
+
+ trace("%pK", udc);
+
+ if (udc == NULL) {
+ err("EINVAL");
+ return;
+ }
+
+ for (i = 0; i < hw_ep_max; i++) {
+ struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
+ int type, num, dir, err = -EINVAL;
+ struct usb_ctrlrequest req;
+
+ if (mEp->desc == NULL)
+ continue; /* not configured */
+
+ if (hw_test_and_clear_complete(i)) {
+ err = isr_tr_complete_low(mEp);
+ if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
+ if (err > 0) /* needs status phase */
+ err = isr_setup_status_phase(udc);
+ if (err < 0) {
+ dbg_event(_usb_addr(mEp),
+ "ERROR", err);
+ spin_unlock(udc->lock);
+ if (usb_ep_set_halt(&mEp->ep))
+ err("error: ep_set_halt");
+ spin_lock(udc->lock);
+ }
+ }
+ }
+
+ if (mEp->type != USB_ENDPOINT_XFER_CONTROL ||
+ !hw_test_and_clear_setup_status(i))
+ continue;
+
+ if (i != 0) {
+ warn("ctrl traffic received at endpoint");
+ continue;
+ }
+
+ /*
+ * Flush data and handshake transactions of previous
+ * setup packet.
+ */
+ _ep_nuke(&udc->ep0out);
+ _ep_nuke(&udc->ep0in);
+
+ /* read_setup_packet */
+ do {
+ hw_test_and_set_setup_guard();
+ memcpy(&req, &mEp->qh.ptr->setup, sizeof(req));
+ /* Ensure buffer is read before acknowledging to h/w */
+ mb();
+ } while (!hw_test_and_clear_setup_guard());
+
+ type = req.bRequestType;
+
+ udc->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
+
+ dbg_setup(_usb_addr(mEp), &req);
+
+ switch (req.bRequest) {
+ case USB_REQ_CLEAR_FEATURE:
+ if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
+ le16_to_cpu(req.wValue) ==
+ USB_ENDPOINT_HALT) {
+ if (req.wLength != 0)
+ break;
+ num = le16_to_cpu(req.wIndex);
+ dir = num & USB_ENDPOINT_DIR_MASK;
+ num &= USB_ENDPOINT_NUMBER_MASK;
+ if (dir) /* TX */
+ num += hw_ep_max/2;
+ if (!udc->ci13xxx_ep[num].wedge) {
+ spin_unlock(udc->lock);
+ err = usb_ep_clear_halt(
+ &udc->ci13xxx_ep[num].ep);
+ spin_lock(udc->lock);
+ if (err)
+ break;
+ }
+ err = isr_setup_status_phase(udc);
+ } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
+ le16_to_cpu(req.wValue) ==
+ USB_DEVICE_REMOTE_WAKEUP) {
+ if (req.wLength != 0)
+ break;
+ udc->gadget.remote_wakeup = 0;
+ err = isr_setup_status_phase(udc);
+ } else {
+ goto delegate;
+ }
+ break;
+ case USB_REQ_GET_STATUS:
+ if (type != (USB_DIR_IN|USB_RECIP_DEVICE) &&
+ type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
+ type != (USB_DIR_IN|USB_RECIP_INTERFACE))
+ goto delegate;
+ if (le16_to_cpu(req.wLength) != 2 ||
+ le16_to_cpu(req.wValue) != 0)
+ break;
+ err = isr_get_status_response(udc, &req);
+ break;
+ case USB_REQ_SET_ADDRESS:
+ if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
+ goto delegate;
+ if (le16_to_cpu(req.wLength) != 0 ||
+ le16_to_cpu(req.wIndex) != 0)
+ break;
+ err = hw_usb_set_address((u8)le16_to_cpu(req.wValue));
+ if (err)
+ break;
+ err = isr_setup_status_phase(udc);
+ break;
+ case USB_REQ_SET_CONFIGURATION:
+ if (type == (USB_DIR_OUT|USB_TYPE_STANDARD))
+ udc->configured = !!req.wValue;
+ goto delegate;
+ case USB_REQ_SET_FEATURE:
+ if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
+ le16_to_cpu(req.wValue) ==
+ USB_ENDPOINT_HALT) {
+ if (req.wLength != 0)
+ break;
+ num = le16_to_cpu(req.wIndex);
+ dir = num & USB_ENDPOINT_DIR_MASK;
+ num &= USB_ENDPOINT_NUMBER_MASK;
+ if (dir) /* TX */
+ num += hw_ep_max/2;
+
+ spin_unlock(udc->lock);
+ err = usb_ep_set_halt(&udc->ci13xxx_ep[num].ep);
+ spin_lock(udc->lock);
+ if (!err)
+ isr_setup_status_phase(udc);
+ } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
+ if (req.wLength != 0)
+ break;
+ switch (le16_to_cpu(req.wValue)) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ udc->gadget.remote_wakeup = 1;
+ err = isr_setup_status_phase(udc);
+ break;
+ case USB_DEVICE_TEST_MODE:
+ tmode = le16_to_cpu(req.wIndex) >> 8;
+ switch (tmode) {
+ case TEST_J:
+ case TEST_K:
+ case TEST_SE0_NAK:
+ case TEST_PACKET:
+ case TEST_FORCE_EN:
+ udc->test_mode = tmode;
+ err = isr_setup_status_phase(
+ udc);
+ break;
+ default:
+ break;
+ }
+ default:
+ goto delegate;
+ }
+ } else {
+ goto delegate;
+ }
+ break;
+ default:
+delegate:
+ if (req.wLength == 0) /* no data phase */
+ udc->ep0_dir = TX;
+
+ spin_unlock(udc->lock);
+ err = udc->driver->setup(&udc->gadget, &req);
+ spin_lock(udc->lock);
+ break;
+ }
+
+ if (err < 0) {
+ dbg_event(_usb_addr(mEp), "ERROR", err);
+
+ spin_unlock(udc->lock);
+ if (usb_ep_set_halt(&mEp->ep))
+ err("error: ep_set_halt");
+ spin_lock(udc->lock);
+ }
+ }
+}
+
+/******************************************************************************
+ * ENDPT block
+ *****************************************************************************/
+/**
+ * ep_enable: configure endpoint, making it usable
+ *
+ * Check usb_ep_enable() at "usb_gadget.h" for details
+ */
+static int ep_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ int retval = 0;
+ unsigned long flags;
+ unsigned int mult = 0;
+
+ trace("ep = %pK, desc = %pK", ep, desc);
+
+ if (ep == NULL || desc == NULL)
+ return -EINVAL;
+
+ spin_lock_irqsave(mEp->lock, flags);
+
+ /* only internal SW should enable ctrl endpts */
+
+ mEp->desc = desc;
+
+ if (!list_empty(&mEp->qh.queue))
+ warn("enabling a non-empty endpoint!");
+
+ mEp->dir = usb_endpoint_dir_in(desc) ? TX : RX;
+ mEp->num = usb_endpoint_num(desc);
+ mEp->type = usb_endpoint_type(desc);
+
+ mEp->ep.maxpacket = usb_endpoint_maxp(desc);
+
+ dbg_event(_usb_addr(mEp), "ENABLE", 0);
+
+ mEp->qh.ptr->cap = 0;
+
+ if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
+ mEp->qh.ptr->cap |= QH_IOS;
+ } else if (mEp->type == USB_ENDPOINT_XFER_ISOC) {
+ mEp->qh.ptr->cap &= ~QH_MULT;
+ mult = ((mEp->ep.maxpacket >> QH_MULT_SHIFT) + 1) & 0x03;
+ mEp->qh.ptr->cap |= (mult << ffs_nr(QH_MULT));
+ } else {
+ mEp->qh.ptr->cap |= QH_ZLT;
+ }
+
+ mEp->qh.ptr->cap |=
+ (mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
+ mEp->qh.ptr->td.next |= TD_TERMINATE; /* needed? */
+
+ /* complete all the updates to ept->head before enabling endpoint*/
+ mb();
+
+ /*
+ * Enable endpoints in the HW other than ep0 as ep0
+ * is always enabled
+ */
+ if (mEp->num)
+ retval |= hw_ep_enable(mEp->num, mEp->dir, mEp->type);
+
+ spin_unlock_irqrestore(mEp->lock, flags);
+ return retval;
+}
+
+/**
+ * ep_disable: endpoint is no longer usable
+ *
+ * Check usb_ep_disable() at "usb_gadget.h" for details
+ */
+static int ep_disable(struct usb_ep *ep)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ int direction, retval = 0;
+ unsigned long flags;
+
+ trace("%pK", ep);
+
+ if (ep == NULL)
+ return -EINVAL;
+ else if (mEp->desc == NULL)
+ return -EBUSY;
+
+ spin_lock_irqsave(mEp->lock, flags);
+
+ /* only internal SW should disable ctrl endpts */
+
+ direction = mEp->dir;
+ do {
+ dbg_event(_usb_addr(mEp), "DISABLE", 0);
+
+ retval |= _ep_nuke(mEp);
+ retval |= hw_ep_disable(mEp->num, mEp->dir);
+
+ if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+ mEp->dir = (mEp->dir == TX) ? RX : TX;
+
+ } while (mEp->dir != direction);
+
+ mEp->desc = NULL;
+ mEp->ep.desc = NULL;
+ mEp->ep.maxpacket = USHRT_MAX;
+
+ spin_unlock_irqrestore(mEp->lock, flags);
+ return retval;
+}
+
+/**
+ * ep_alloc_request: allocate a request object to use with this endpoint
+ *
+ * Check usb_ep_alloc_request() at "usb_gadget.h" for details
+ */
+static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ struct ci13xxx_req *mReq = NULL;
+
+ trace("%pK, %i", ep, gfp_flags);
+
+ if (ep == NULL) {
+ err("EINVAL");
+ return NULL;
+ }
+
+ mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
+ if (mReq != NULL) {
+ INIT_LIST_HEAD(&mReq->queue);
+ mReq->req.dma = DMA_ERROR_CODE;
+
+ mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
+ &mReq->dma);
+ if (mReq->ptr == NULL) {
+ kfree(mReq);
+ mReq = NULL;
+ }
+ }
+
+ dbg_event(_usb_addr(mEp), "ALLOC", mReq == NULL);
+
+ return (mReq == NULL) ? NULL : &mReq->req;
+}
+
+/**
+ * ep_free_request: frees a request object
+ *
+ * Check usb_ep_free_request() at "usb_gadget.h" for details
+ */
+static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
+ unsigned long flags;
+
+ trace("%pK, %pK", ep, req);
+
+ if (ep == NULL || req == NULL) {
+ err("EINVAL");
+ return;
+ } else if (!list_empty(&mReq->queue)) {
+ err("EBUSY");
+ return;
+ }
+
+ spin_lock_irqsave(mEp->lock, flags);
+
+ if (mReq->ptr)
+ dma_pool_free(mEp->td_pool, mReq->ptr, mReq->dma);
+ kfree(mReq);
+
+ dbg_event(_usb_addr(mEp), "FREE", 0);
+
+ spin_unlock_irqrestore(mEp->lock, flags);
+}
+
+/**
+ * ep_queue: queues (submits) an I/O request to an endpoint
+ *
+ * Check usb_ep_queue()* at usb_gadget.h" for details
+ */
+static int ep_queue(struct usb_ep *ep, struct usb_request *req,
+ gfp_t __maybe_unused gfp_flags)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
+ int retval = 0;
+ unsigned long flags;
+ struct ci13xxx *udc = _udc;
+
+ trace("%pK, %pK, %X", ep, req, gfp_flags);
+
+ if (ep == NULL)
+ return -EINVAL;
+
+ spin_lock_irqsave(mEp->lock, flags);
+ if (req == NULL || mEp->desc == NULL) {
+ retval = -EINVAL;
+ goto done;
+ }
+
+ if (!udc->softconnect) {
+ retval = -ENODEV;
+ goto done;
+ }
+
+ if (!udc->configured && mEp->type !=
+ USB_ENDPOINT_XFER_CONTROL) {
+ trace("usb is not configured ept #%d, ept name#%s\n",
+ mEp->num, mEp->ep.name);
+ retval = -ESHUTDOWN;
+ goto done;
+ }
+
+ if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
+ if (req->length)
+ mEp = (_udc->ep0_dir == RX) ?
+ &_udc->ep0out : &_udc->ep0in;
+ if (!list_empty(&mEp->qh.queue)) {
+ _ep_nuke(mEp);
+ retval = -EOVERFLOW;
+ warn("endpoint ctrl %X nuked", _usb_addr(mEp));
+ }
+ }
+
+ if (ep->endless && udc->gadget.speed == USB_SPEED_FULL) {
+ err("Queueing endless req is not supported for FS");
+ retval = -EINVAL;
+ goto done;
+ }
+
+ /* first nuke then test link, e.g. previous status has not sent */
+ if (!list_empty(&mReq->queue)) {
+ retval = -EBUSY;
+ err("request already in queue");
+ goto done;
+ }
+ if (mEp->multi_req) {
+ retval = -EAGAIN;
+ err("Large request is in progress. come again");
+ goto done;
+ }
+
+ if (req->length > (4 * CI13XXX_PAGE_SIZE)) {
+ if (!list_empty(&mEp->qh.queue)) {
+ retval = -EAGAIN;
+ err("Queue is busy. Large req is not allowed");
+ goto done;
+ }
+ if ((mEp->type != USB_ENDPOINT_XFER_BULK) ||
+ (mEp->dir != RX)) {
+ retval = -EINVAL;
+ err("Larger req is supported only for Bulk OUT");
+ goto done;
+ }
+ mEp->multi_req = true;
+ mReq->multi.len = req->length;
+ mReq->multi.buf = req->buf;
+ req->length = (4 * CI13XXX_PAGE_SIZE);
+ }
+
+ dbg_queue(_usb_addr(mEp), req, retval);
+
+ /* push request */
+ mReq->req.status = -EINPROGRESS;
+ mReq->req.actual = 0;
+
+ if (udc->rw_pending) {
+ list_add_tail(&mReq->queue, &mEp->rw_queue);
+ retval = 0;
+ goto done;
+ }
+
+ if (udc->suspended) {
+ /* Remote Wakeup */
+ if (!udc->gadget.remote_wakeup) {
+
+ dev_dbg(mEp->device, "%s: queue failed (suspend).",
+ __func__);
+ dev_dbg(mEp->device, "%s: Remote wakeup is not supported. ept #%d\n",
+ __func__, mEp->num);
+ mEp->multi_req = false;
+
+ retval = -EAGAIN;
+ goto done;
+ }
+
+ list_add_tail(&mReq->queue, &mEp->rw_queue);
+
+ udc->rw_pending = true;
+ schedule_delayed_work(&udc->rw_work,
+ REMOTE_WAKEUP_DELAY);
+
+ retval = 0;
+ goto done;
+ }
+
+ retval = _hardware_enqueue(mEp, mReq);
+
+ if (retval == -EALREADY) {
+ dbg_event(_usb_addr(mEp), "QUEUE", retval);
+ retval = 0;
+ }
+ if (!retval)
+ list_add_tail(&mReq->queue, &mEp->qh.queue);
+ else if (mEp->multi_req)
+ mEp->multi_req = false;
+
+ done:
+ spin_unlock_irqrestore(mEp->lock, flags);
+ return retval;
+}
+
+/**
+ * ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint
+ *
+ * Check usb_ep_dequeue() at "usb_gadget.h" for details
+ */
+static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ struct ci13xxx_ep *mEpTemp = mEp;
+ struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
+ struct ci13xxx *udc = _udc;
+ unsigned long flags;
+
+ trace("%pK, %pK", ep, req);
+
+ if (udc->udc_driver->in_lpm && udc->udc_driver->in_lpm(udc)) {
+ dev_err(udc->transceiver->dev,
+ "%s: Unable to dequeue while in LPM\n",
+ __func__);
+ return -EAGAIN;
+ }
+
+ if (ep == NULL)
+ return -EINVAL;
+
+ spin_lock_irqsave(mEp->lock, flags);
+ /*
+ * Only ep0 IN is exposed to composite. When a req is dequeued
+ * on ep0, check both ep0 IN and ep0 OUT queues.
+ */
+ if (req == NULL || mReq->req.status != -EALREADY ||
+ mEp->desc == NULL || list_empty(&mReq->queue) ||
+ (list_empty(&mEp->qh.queue) && ((mEp->type !=
+ USB_ENDPOINT_XFER_CONTROL) ||
+ list_empty(&_udc->ep0out.qh.queue)))) {
+ spin_unlock_irqrestore(mEp->lock, flags);
+ return -EINVAL;
+ }
+
+ dbg_event(_usb_addr(mEp), "DEQUEUE", 0);
+
+ if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
+ hw_ep_flush(_udc->ep0out.num, RX);
+ hw_ep_flush(_udc->ep0in.num, TX);
+ } else {
+ hw_ep_flush(mEp->num, mEp->dir);
+ }
+
+ /* pop request */
+ list_del_init(&mReq->queue);
+ if (mReq->map) {
+ dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
+ mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ mReq->req.dma = DMA_ERROR_CODE;
+ mReq->map = 0;
+ }
+ req->status = -ECONNRESET;
+
+ if (mEp->last_zptr) {
+ dma_pool_free(mEp->td_pool, mEp->last_zptr, mEp->last_zdma);
+ mEp->last_zptr = NULL;
+ mEp->last_zdma = 0;
+ }
+
+ if (mReq->zptr) {
+ dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma);
+ mReq->zptr = NULL;
+ mReq->zdma = 0;
+ }
+
+ if (mEp->multi_req) {
+ restore_original_req(mReq);
+ mEp->multi_req = false;
+ }
+
+ if (mReq->req.complete != NULL) {
+ spin_unlock(mEp->lock);
+ if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
+ mReq->req.length)
+ mEpTemp = &_udc->ep0in;
+ mReq->req.complete(&mEpTemp->ep, &mReq->req);
+ if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+ mReq->req.complete = NULL;
+ spin_lock(mEp->lock);
+ }
+
+ spin_unlock_irqrestore(mEp->lock, flags);
+ return 0;
+}
+
+static int is_sps_req(struct ci13xxx_req *mReq)
+{
+ return (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID &&
+ mReq->req.udc_priv & MSM_SPS_MODE);
+}
+
+/**
+ * ep_set_halt: sets the endpoint halt feature
+ *
+ * Check usb_ep_set_halt() at "usb_gadget.h" for details
+ */
+static int ep_set_halt(struct usb_ep *ep, int value)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ struct ci13xxx *udc = _udc;
+ int direction, retval = 0;
+ unsigned long flags;
+
+ trace("%pK, %i", ep, value);
+
+ if (ep == NULL || mEp->desc == NULL)
+ return -EINVAL;
+
+ if (udc->suspended) {
+ dev_err(udc->transceiver->dev,
+ "%s: Unable to halt EP while suspended\n", __func__);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(mEp->lock, flags);
+
+#ifndef STALL_IN
+ /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
+ if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX &&
+ !list_empty(&mEp->qh.queue) &&
+ !is_sps_req(list_entry(mEp->qh.queue.next, struct ci13xxx_req,
+ queue))){
+ spin_unlock_irqrestore(mEp->lock, flags);
+ return -EAGAIN;
+ }
+#endif
+
+ direction = mEp->dir;
+ do {
+ dbg_event(_usb_addr(mEp), "HALT", value);
+ retval |= hw_ep_set_halt(mEp->num, mEp->dir, value);
+
+ if (!value)
+ mEp->wedge = 0;
+
+ if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+ mEp->dir = (mEp->dir == TX) ? RX : TX;
+
+ } while (mEp->dir != direction);
+
+ spin_unlock_irqrestore(mEp->lock, flags);
+ return retval;
+}
+
+/**
+ * ep_set_wedge: sets the halt feature and ignores clear requests
+ *
+ * Check usb_ep_set_wedge() at "usb_gadget.h" for details
+ */
+static int ep_set_wedge(struct usb_ep *ep)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ unsigned long flags;
+
+ trace("%pK", ep);
+
+ if (ep == NULL || mEp->desc == NULL)
+ return -EINVAL;
+
+ spin_lock_irqsave(mEp->lock, flags);
+
+ dbg_event(_usb_addr(mEp), "WEDGE", 0);
+ mEp->wedge = 1;
+
+ spin_unlock_irqrestore(mEp->lock, flags);
+
+ return usb_ep_set_halt(ep);
+}
+
+/**
+ * ep_fifo_flush: flushes contents of a fifo
+ *
+ * Check usb_ep_fifo_flush() at "usb_gadget.h" for details
+ */
+static void ep_fifo_flush(struct usb_ep *ep)
+{
+ struct ci13xxx *udc = _udc;
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ unsigned long flags;
+
+ trace("%pK", ep);
+
+ if (ep == NULL) {
+ err("%02X: -EINVAL", _usb_addr(mEp));
+ return;
+ }
+
+ if (udc->udc_driver->in_lpm && udc->udc_driver->in_lpm(udc)) {
+ dev_err(udc->transceiver->dev,
+ "%s: Unable to fifo_flush while in LPM\n",
+ __func__);
+ return;
+ }
+
+ spin_lock_irqsave(mEp->lock, flags);
+
+ dbg_event(_usb_addr(mEp), "FFLUSH", 0);
+ /*
+ * _ep_nuke() takes care of flushing the endpoint.
+ * some function drivers expect udc to retire all
+ * pending requests upon flushing an endpoint. There
+ * is no harm in doing it.
+ */
+ _ep_nuke(mEp);
+
+ spin_unlock_irqrestore(mEp->lock, flags);
+}
+
+/**
+ * Endpoint-specific part of the API to the USB controller hardware
+ * Check "usb_gadget.h" for details
+ */
+static const struct usb_ep_ops usb_ep_ops = {
+ .enable = ep_enable,
+ .disable = ep_disable,
+ .alloc_request = ep_alloc_request,
+ .free_request = ep_free_request,
+ .queue = ep_queue,
+ .dequeue = ep_dequeue,
+ .set_halt = ep_set_halt,
+ .set_wedge = ep_set_wedge,
+ .fifo_flush = ep_fifo_flush,
+};
+
+/******************************************************************************
+ * GADGET block
+ *****************************************************************************/
+static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active)
+{
+ struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
+ unsigned long flags;
+ int gadget_ready = 0;
+
+ if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS))
+ return -EOPNOTSUPP;
+
+ spin_lock_irqsave(udc->lock, flags);
+ udc->vbus_active = is_active;
+ if (udc->driver)
+ gadget_ready = 1;
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ if (!gadget_ready)
+ return 0;
+
+ if (is_active) {
+ hw_device_reset(udc);
+ if (udc->udc_driver->notify_event)
+ udc->udc_driver->notify_event(udc,
+ CI13XXX_CONTROLLER_CONNECT_EVENT);
+ /* Enable BAM (if needed) before starting controller */
+ if (udc->softconnect) {
+ dbg_event(0xFF, "BAM EN2",
+ _gadget->bam2bam_func_enabled);
+ msm_usb_bam_enable(CI_CTRL,
+ _gadget->bam2bam_func_enabled);
+ hw_device_state(udc->ep0out.qh.dma);
+ }
+ } else {
+ hw_device_state(0);
+ _gadget_stop_activity(&udc->gadget);
+ if (udc->udc_driver->notify_event)
+ udc->udc_driver->notify_event(udc,
+ CI13XXX_CONTROLLER_DISCONNECT_EVENT);
+ }
+
+ return 0;
+}
+
+#define VBUS_DRAW_BUF_LEN 10
+#define MAX_OVERRIDE_VBUS_ALLOWED 900 /* 900 mA */
+static char vbus_draw_mA[VBUS_DRAW_BUF_LEN];
+module_param_string(vbus_draw_mA, vbus_draw_mA, VBUS_DRAW_BUF_LEN, 0644);
+
+static int ci13xxx_vbus_draw(struct usb_gadget *_gadget, unsigned int mA)
+{
+ struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
+ unsigned int override_mA = 0;
+
+ /* override param to draw more current if battery draining faster */
+ if ((mA == CONFIG_USB_GADGET_VBUS_DRAW) &&
+ (vbus_draw_mA[0] != '\0')) {
+ if ((!kstrtoint(vbus_draw_mA, 10, &override_mA)) &&
+ (override_mA <= MAX_OVERRIDE_VBUS_ALLOWED)) {
+ mA = override_mA;
+ }
+ }
+
+ if (udc->transceiver)
+ return usb_phy_set_power(udc->transceiver, mA);
+ return -ENOTSUPP;
+}
+
+static int ci13xxx_pullup(struct usb_gadget *_gadget, int is_active)
+{
+ struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(udc->lock, flags);
+ udc->softconnect = is_active;
+ if (((udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) &&
+ !udc->vbus_active) || !udc->driver) {
+ spin_unlock_irqrestore(udc->lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ pm_runtime_get_sync(&_gadget->dev);
+
+ /* Enable BAM (if needed) before starting controller */
+ if (is_active) {
+ dbg_event(0xFF, "BAM EN1", _gadget->bam2bam_func_enabled);
+ msm_usb_bam_enable(CI_CTRL, _gadget->bam2bam_func_enabled);
+ }
+
+ spin_lock_irqsave(udc->lock, flags);
+ if (!udc->vbus_active) {
+ spin_unlock_irqrestore(udc->lock, flags);
+ pm_runtime_put_sync(&_gadget->dev);
+ return 0;
+ }
+ if (is_active) {
+ spin_unlock(udc->lock);
+ if (udc->udc_driver->notify_event)
+ udc->udc_driver->notify_event(udc,
+ CI13XXX_CONTROLLER_CONNECT_EVENT);
+ spin_lock(udc->lock);
+ hw_device_state(udc->ep0out.qh.dma);
+ } else {
+ hw_device_state(0);
+ }
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ pm_runtime_mark_last_busy(&_gadget->dev);
+ pm_runtime_put_autosuspend(&_gadget->dev);
+
+ return 0;
+}
+
+static int ci13xxx_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver);
+static int ci13xxx_stop(struct usb_gadget *gadget);
+
+/**
+ * Device operations part of the API to the USB controller hardware,
+ * which don't involve endpoints (or i/o)
+ * Check "usb_gadget.h" for details
+ */
+static const struct usb_gadget_ops usb_gadget_ops = {
+ .vbus_session = ci13xxx_vbus_session,
+ .wakeup = ci13xxx_wakeup,
+ .vbus_draw = ci13xxx_vbus_draw,
+ .pullup = ci13xxx_pullup,
+ .udc_start = ci13xxx_start,
+ .udc_stop = ci13xxx_stop,
+};
+
+/**
+ * ci13xxx_start: register a gadget driver
+ * @gadget: our gadget
+ * @driver: the driver being registered
+ *
+ * Interrupts are enabled here.
+ */
+static int ci13xxx_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct ci13xxx *udc = _udc;
+ unsigned long flags;
+ int retval = -ENOMEM;
+
+ trace("%pK", driver);
+
+ if (driver == NULL ||
+ driver->setup == NULL ||
+ driver->disconnect == NULL)
+ return -EINVAL;
+ else if (udc == NULL)
+ return -ENODEV;
+ else if (udc->driver != NULL)
+ return -EBUSY;
+
+ spin_lock_irqsave(udc->lock, flags);
+
+ info("hw_ep_max = %d", hw_ep_max);
+
+ udc->gadget.dev.driver = NULL;
+
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ pm_runtime_get_sync(&udc->gadget.dev);
+
+ udc->ep0out.ep.desc = &ctrl_endpt_out_desc;
+ retval = usb_ep_enable(&udc->ep0out.ep);
+ if (retval)
+ goto pm_put;
+
+ udc->ep0in.ep.desc = &ctrl_endpt_in_desc;
+ retval = usb_ep_enable(&udc->ep0in.ep);
+ if (retval)
+ goto pm_put;
+ udc->status = usb_ep_alloc_request(&udc->ep0in.ep, GFP_KERNEL);
+ if (!udc->status) {
+ retval = -ENOMEM;
+ goto pm_put;
+ }
+
+ udc->status_buf = kzalloc(2 + udc->gadget.extra_buf_alloc,
+ GFP_KERNEL); /* for GET_STATUS */
+ if (!udc->status_buf) {
+ usb_ep_free_request(&udc->ep0in.ep, udc->status);
+ retval = -ENOMEM;
+ goto pm_put;
+ }
+ spin_lock_irqsave(udc->lock, flags);
+
+ udc->gadget.ep0 = &udc->ep0in.ep;
+ /* bind gadget */
+ driver->driver.bus = NULL;
+ udc->gadget.dev.driver = &driver->driver;
+
+ udc->driver = driver;
+ if (udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) {
+ if (udc->vbus_active) {
+ if (udc->udc_driver->flags & CI13XXX_REGS_SHARED)
+ hw_device_reset(udc);
+ } else {
+ goto done;
+ }
+ }
+
+ if (!udc->softconnect)
+ goto done;
+
+ retval = hw_device_state(udc->ep0out.qh.dma);
+
+done:
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ if (udc->udc_driver->notify_event)
+ udc->udc_driver->notify_event(udc,
+ CI13XXX_CONTROLLER_UDC_STARTED_EVENT);
+pm_put:
+ pm_runtime_put(&udc->gadget.dev);
+
+ return retval;
+}
+
+/**
+ * ci13xxx_stop: unregister a gadget driver
+ *
+ * Check usb_gadget_unregister_driver() at "usb_gadget.h" for details
+ */
+static int ci13xxx_stop(struct usb_gadget *gadget)
+{
+ struct ci13xxx *udc = _udc;
+ unsigned long flags;
+
+ spin_lock_irqsave(udc->lock, flags);
+
+ if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) ||
+ udc->vbus_active) {
+ hw_device_state(0);
+ spin_unlock_irqrestore(udc->lock, flags);
+ _gadget_stop_activity(&udc->gadget);
+ spin_lock_irqsave(udc->lock, flags);
+ }
+
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ usb_ep_free_request(&udc->ep0in.ep, udc->status);
+ kfree(udc->status_buf);
+
+ return 0;
+}
+
+/******************************************************************************
+ * BUS block
+ *****************************************************************************/
+/**
+ * udc_irq: global interrupt handler
+ *
+ * This function returns IRQ_HANDLED if the IRQ has been handled
+ * It locks access to registers
+ */
+static irqreturn_t udc_irq(void)
+{
+ struct ci13xxx *udc = _udc;
+ irqreturn_t retval;
+ u32 intr;
+
+ trace();
+
+ if (udc == NULL) {
+ err("ENODEV");
+ return IRQ_HANDLED;
+ }
+
+ spin_lock(udc->lock);
+
+ if (udc->udc_driver->in_lpm && udc->udc_driver->in_lpm(udc)) {
+ spin_unlock(udc->lock);
+ return IRQ_NONE;
+ }
+
+ if (udc->udc_driver->flags & CI13XXX_REGS_SHARED) {
+ if (hw_cread(CAP_USBMODE, USBMODE_CM) !=
+ USBMODE_CM_DEVICE) {
+ spin_unlock(udc->lock);
+ return IRQ_NONE;
+ }
+ }
+ intr = hw_test_and_clear_intr_active();
+ if (intr) {
+ isr_statistics.hndl.buf[isr_statistics.hndl.idx++] = intr;
+ isr_statistics.hndl.idx &= ISR_MASK;
+ isr_statistics.hndl.cnt++;
+
+ /* order defines priority - do NOT change it */
+ if (USBi_URI & intr) {
+ isr_statistics.uri++;
+ if (!hw_cread(CAP_PORTSC, PORTSC_PR))
+ pr_info("%s: USB reset interrupt is delayed\n",
+ __func__);
+ isr_reset_handler(udc);
+ }
+ if (USBi_PCI & intr) {
+ isr_statistics.pci++;
+ isr_resume_handler(udc);
+ }
+ if (USBi_UEI & intr)
+ isr_statistics.uei++;
+ if (USBi_UI & intr) {
+ isr_statistics.ui++;
+ isr_tr_complete_handler(udc);
+ }
+ if (USBi_SLI & intr) {
+ isr_suspend_handler(udc);
+ isr_statistics.sli++;
+ }
+ retval = IRQ_HANDLED;
+ } else {
+ isr_statistics.none++;
+ retval = IRQ_NONE;
+ }
+ spin_unlock(udc->lock);
+
+ return retval;
+}
+
+static void destroy_eps(struct ci13xxx *ci)
+{
+ int i;
+
+ for (i = 0; i < hw_ep_max; i++) {
+ struct ci13xxx_ep *mEp = &ci->ci13xxx_ep[i];
+
+ dma_pool_free(ci->qh_pool, mEp->qh.ptr, mEp->qh.dma);
+ }
+}
+
+/**
+ * udc_probe: parent probe must call this to initialize UDC
+ * @dev: parent device
+ * @regs: registers base address
+ * @name: driver name
+ *
+ * This function returns an error code
+ * No interrupts active, the IRQ has not been requested yet
+ * Kernel assumes 32-bit DMA operations by default, no need to dma_set_mask
+ */
+static int udc_probe(struct ci13xxx_udc_driver *driver, struct device *dev,
+ void __iomem *regs)
+{
+ struct ci13xxx *udc;
+ struct ci13xxx_platform_data *pdata;
+ int retval = 0, i, j;
+
+ trace("%pK, %pK, %pK", dev, regs, driver->name);
+
+ if (dev == NULL || regs == NULL || driver == NULL ||
+ driver->name == NULL)
+ return -EINVAL;
+
+ udc = kzalloc(sizeof(struct ci13xxx), GFP_KERNEL);
+ if (udc == NULL)
+ return -ENOMEM;
+
+ udc->lock = &udc_lock;
+ udc->regs = regs;
+ udc->udc_driver = driver;
+
+ udc->gadget.ops = &usb_gadget_ops;
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->gadget.max_speed = USB_SPEED_HIGH;
+ udc->gadget.is_otg = 0;
+ udc->gadget.name = driver->name;
+
+ /* alloc resources */
+ udc->qh_pool = dma_pool_create("ci13xxx_qh", dev,
+ sizeof(struct ci13xxx_qh),
+ 64, CI13XXX_PAGE_SIZE);
+ if (udc->qh_pool == NULL) {
+ retval = -ENOMEM;
+ goto free_udc;
+ }
+
+ udc->td_pool = dma_pool_create("ci13xxx_td", dev,
+ sizeof(struct ci13xxx_td),
+ 64, CI13XXX_PAGE_SIZE);
+ if (udc->td_pool == NULL) {
+ retval = -ENOMEM;
+ goto free_qh_pool;
+ }
+
+ INIT_DELAYED_WORK(&udc->rw_work, usb_do_remote_wakeup);
+
+ retval = hw_device_init(regs);
+ if (retval < 0)
+ goto free_qh_pool;
+
+ INIT_LIST_HEAD(&udc->gadget.ep_list);
+ for (i = 0; i < hw_ep_max; i++) {
+ struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
+
+ INIT_LIST_HEAD(&mEp->ep.ep_list);
+ INIT_LIST_HEAD(&mEp->rw_queue);
+ setup_timer(&mEp->prime_timer, ep_prime_timer_func,
+ (unsigned long) mEp);
+ }
+
+ for (i = 0; i < hw_ep_max/2; i++) {
+ for (j = RX; j <= TX; j++) {
+ int k = i + j * hw_ep_max/2;
+ struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[k];
+
+ scnprintf(mEp->name, sizeof(mEp->name), "ep%i%s", i,
+ (j == TX) ? "in" : "out");
+
+ mEp->lock = udc->lock;
+ mEp->device = &udc->gadget.dev;
+ mEp->td_pool = udc->td_pool;
+
+ mEp->ep.name = mEp->name;
+ mEp->ep.ops = &usb_ep_ops;
+ usb_ep_set_maxpacket_limit(&mEp->ep,
+ k ? USHRT_MAX : CTRL_PAYLOAD_MAX);
+
+ INIT_LIST_HEAD(&mEp->qh.queue);
+ mEp->qh.ptr = dma_pool_alloc(udc->qh_pool, GFP_KERNEL,
+ &mEp->qh.dma);
+ if (mEp->qh.ptr == NULL)
+ retval = -ENOMEM;
+ else
+ memset(mEp->qh.ptr, 0, sizeof(*mEp->qh.ptr));
+
+ /* skip ep0 out and in endpoints */
+ if (i == 0)
+ continue;
+
+ list_add_tail(&mEp->ep.ep_list, &udc->gadget.ep_list);
+ }
+ }
+
+ if (retval)
+ goto free_dma_pools;
+
+ udc->gadget.ep0 = &udc->ep0in.ep;
+
+ pdata = dev->platform_data;
+ if (pdata) {
+ if (pdata->enable_axi_prefetch)
+ udc->gadget.extra_buf_alloc = EXTRA_ALLOCATION_SIZE;
+ }
+
+ if (udc->udc_driver->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
+ udc->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (udc->transceiver == NULL) {
+ retval = -ENODEV;
+ goto destroy_eps;
+ }
+ }
+
+ if (!(udc->udc_driver->flags & CI13XXX_REGS_SHARED)) {
+ retval = hw_device_reset(udc);
+ if (retval)
+ goto put_transceiver;
+ }
+
+ if (udc->transceiver) {
+ retval = otg_set_peripheral(udc->transceiver->otg,
+ &udc->gadget);
+ if (retval)
+ goto put_transceiver;
+ }
+
+ retval = usb_add_gadget_udc(dev, &udc->gadget);
+ if (retval)
+ goto remove_trans;
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+ retval = dbg_create_files(&udc->gadget.dev);
+ if (retval) {
+ pr_err("Registering sysfs files for debug failed!!!!\n");
+ goto del_udc;
+ }
+#endif
+
+ pm_runtime_no_callbacks(&udc->gadget.dev);
+ pm_runtime_set_active(&udc->gadget.dev);
+ pm_runtime_enable(&udc->gadget.dev);
+
+ /* Use delayed LPM especially for composition-switch in LPM (suspend) */
+ pm_runtime_set_autosuspend_delay(&udc->gadget.dev, 2000);
+ pm_runtime_use_autosuspend(&udc->gadget.dev);
+
+ _udc = udc;
+ return retval;
+
+del_udc:
+ usb_del_gadget_udc(&udc->gadget);
+remove_trans:
+ if (udc->transceiver)
+ otg_set_peripheral(udc->transceiver->otg, &udc->gadget);
+
+ err("error = %i", retval);
+put_transceiver:
+ if (udc->transceiver)
+ usb_put_phy(udc->transceiver);
+destroy_eps:
+ destroy_eps(udc);
+free_dma_pools:
+ dma_pool_destroy(udc->td_pool);
+free_qh_pool:
+ dma_pool_destroy(udc->qh_pool);
+free_udc:
+ kfree(udc);
+ _udc = NULL;
+ return retval;
+}
+
+/**
+ * udc_remove: parent remove must call this to remove UDC
+ *
+ * No interrupts active, the IRQ has been released
+ */
+static void udc_remove(void)
+{
+ struct ci13xxx *udc = _udc;
+
+ if (udc == NULL) {
+ err("EINVAL");
+ return;
+ }
+
+ usb_del_gadget_udc(&udc->gadget);
+
+ if (udc->transceiver) {
+ otg_set_peripheral(udc->transceiver->otg, &udc->gadget);
+ usb_put_phy(udc->transceiver);
+ }
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+ dbg_remove_files(&udc->gadget.dev);
+#endif
+ destroy_eps(udc);
+ dma_pool_destroy(udc->td_pool);
+ dma_pool_destroy(udc->qh_pool);
+
+ kfree(udc);
+ _udc = NULL;
+}
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h
new file mode 100644
index 0000000..8c93080
--- /dev/null
+++ b/drivers/usb/gadget/ci13xxx_udc.h
@@ -0,0 +1,282 @@
+/*
+ * ci13xxx_udc.h - structures, registers, and macros MIPS USB IP core
+ *
+ * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
+ *
+ * Author: David Lopo
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Description: MIPS USB IP core family device controller
+ * Structures, registers and logging macros
+ */
+
+#ifndef _CI13XXX_h_
+#define _CI13XXX_h_
+
+/******************************************************************************
+ * DEFINE
+ *****************************************************************************/
+#define CI13XXX_PAGE_SIZE 4096ul /* page size for TD's */
+#define ENDPT_MAX (32)
+#define CTRL_PAYLOAD_MAX (64)
+#define RX (0) /* similar to USB_DIR_OUT but can be used as an index */
+#define TX (1) /* similar to USB_DIR_IN but can be used as an index */
+
+/* UDC private data:
+ * 16MSb - Vendor ID | 16 LSb Vendor private data
+ */
+#define CI13XX_REQ_VENDOR_ID(id) (id & 0xFFFF0000UL)
+
+#define MSM_ETD_TYPE BIT(1)
+#define MSM_EP_PIPE_ID_RESET_VAL 0x1F001F
+
+/******************************************************************************
+ * STRUCTURES
+ *****************************************************************************/
+/* DMA layout of transfer descriptors */
+struct ci13xxx_td {
+ /* 0 */
+ u32 next;
+#define TD_TERMINATE BIT(0)
+#define TD_ADDR_MASK (0xFFFFFFEUL << 5)
+ /* 1 */
+ u32 token;
+#define TD_STATUS (0x00FFUL << 0)
+#define TD_STATUS_TR_ERR BIT(3)
+#define TD_STATUS_DT_ERR BIT(5)
+#define TD_STATUS_HALTED BIT(6)
+#define TD_STATUS_ACTIVE BIT(7)
+#define TD_MULTO (0x0003UL << 10)
+#define TD_IOC BIT(15)
+#define TD_TOTAL_BYTES (0x7FFFUL << 16)
+ /* 2 */
+ u32 page[5];
+#define TD_CURR_OFFSET (0x0FFFUL << 0)
+#define TD_FRAME_NUM (0x07FFUL << 0)
+#define TD_RESERVED_MASK (0x0FFFUL << 0)
+} __packed __aligned(4);
+
+/* DMA layout of queue heads */
+struct ci13xxx_qh {
+ /* 0 */
+ u32 cap;
+#define QH_IOS BIT(15)
+#define QH_MAX_PKT (0x07FFUL << 16)
+#define QH_ZLT BIT(29)
+#define QH_MULT (0x0003UL << 30)
+#define QH_MULT_SHIFT 11
+ /* 1 */
+ u32 curr;
+ /* 2 - 8 */
+ struct ci13xxx_td td;
+ /* 9 */
+ u32 RESERVED;
+ struct usb_ctrlrequest setup;
+} __packed __aligned(4);
+
+/* cache of larger request's original attributes */
+struct ci13xxx_multi_req {
+ unsigned int len;
+ unsigned int actual;
+ void *buf;
+};
+
+/* Extension of usb_request */
+struct ci13xxx_req {
+ struct usb_request req;
+ unsigned int map;
+ struct list_head queue;
+ struct ci13xxx_td *ptr;
+ dma_addr_t dma;
+ struct ci13xxx_td *zptr;
+ dma_addr_t zdma;
+ struct ci13xxx_multi_req multi;
+};
+
+/* Extension of usb_ep */
+struct ci13xxx_ep {
+ struct usb_ep ep;
+ const struct usb_endpoint_descriptor *desc;
+ u8 dir;
+ u8 num;
+ u8 type;
+ char name[16];
+ struct {
+ struct list_head queue;
+ struct ci13xxx_qh *ptr;
+ dma_addr_t dma;
+ } qh;
+ struct list_head rw_queue;
+ int wedge;
+
+ /* global resources */
+ spinlock_t *lock;
+ struct device *device;
+ struct dma_pool *td_pool;
+ struct ci13xxx_td *last_zptr;
+ dma_addr_t last_zdma;
+ unsigned long dTD_update_fail_count;
+ unsigned long dTD_active_re_q_count;
+ unsigned long prime_fail_count;
+ int prime_timer_count;
+ struct timer_list prime_timer;
+
+ bool multi_req;
+};
+
+struct ci13xxx;
+struct ci13xxx_udc_driver {
+ const char *name;
+ unsigned long flags;
+ unsigned int nz_itc;
+#define CI13XXX_REGS_SHARED BIT(0)
+#define CI13XXX_REQUIRE_TRANSCEIVER BIT(1)
+#define CI13XXX_PULLUP_ON_VBUS BIT(2)
+#define CI13XXX_DISABLE_STREAMING BIT(3)
+#define CI13XXX_ZERO_ITC BIT(4)
+#define CI13XXX_ENABLE_AHB2AHB_BYPASS BIT(6)
+
+#define CI13XXX_CONTROLLER_RESET_EVENT 0
+#define CI13XXX_CONTROLLER_CONNECT_EVENT 1
+#define CI13XXX_CONTROLLER_SUSPEND_EVENT 2
+#define CI13XXX_CONTROLLER_REMOTE_WAKEUP_EVENT 3
+#define CI13XXX_CONTROLLER_RESUME_EVENT 4
+#define CI13XXX_CONTROLLER_DISCONNECT_EVENT 5
+#define CI13XXX_CONTROLLER_UDC_STARTED_EVENT 6
+#define CI13XXX_CONTROLLER_ERROR_EVENT 7
+
+ void (*notify_event)(struct ci13xxx *udc, unsigned int event);
+ bool (*in_lpm)(struct ci13xxx *udc);
+};
+
+/* CI13XXX UDC descriptor & global resources */
+struct ci13xxx {
+ spinlock_t *lock; /* ctrl register bank access */
+ void __iomem *regs; /* registers address space */
+
+ struct dma_pool *qh_pool; /* DMA pool for queue heads */
+ struct dma_pool *td_pool; /* DMA pool for transfer descs */
+ struct usb_request *status; /* ep0 status request */
+ void *status_buf;/* GET_STATUS buffer */
+
+ struct usb_gadget gadget; /* USB slave device */
+ struct ci13xxx_ep ci13xxx_ep[ENDPT_MAX]; /* extended endpts */
+ u32 ep0_dir; /* ep0 direction */
+#define ep0out ci13xxx_ep[0]
+#define ep0in ci13xxx_ep[hw_ep_max / 2]
+ u8 suspended; /* suspended by the host */
+ u8 configured; /* is device configured */
+ u8 test_mode; /* the selected test mode */
+ bool rw_pending; /* Remote wakeup pending flag */
+ struct delayed_work rw_work; /* remote wakeup delayed work */
+ struct usb_gadget_driver *driver; /* 3rd party gadget driver */
+ struct ci13xxx_udc_driver *udc_driver; /* device controller driver */
+ int vbus_active; /* is VBUS active */
+ int softconnect; /* is pull-up enable allowed */
+ unsigned long dTD_update_fail_count;
+ struct usb_phy *transceiver; /* Transceiver struct */
+ bool skip_flush; /*
+ * skip flushing remaining EP
+ * upon flush timeout for the
+ * first EP.
+ */
+};
+
+/******************************************************************************
+ * REGISTERS
+ *****************************************************************************/
+/* register size */
+#define REG_BITS (32)
+
+/* HCCPARAMS */
+#define HCCPARAMS_LEN BIT(17)
+
+/* DCCPARAMS */
+#define DCCPARAMS_DEN (0x1F << 0)
+#define DCCPARAMS_DC BIT(7)
+
+/* TESTMODE */
+#define TESTMODE_FORCE BIT(0)
+
+/* AHB_MODE */
+#define AHB2AHB_BYPASS BIT(31)
+
+/* USBCMD */
+#define USBCMD_RS BIT(0)
+#define USBCMD_RST BIT(1)
+#define USBCMD_SUTW BIT(13)
+#define USBCMD_ATDTW BIT(14)
+
+/* USBSTS & USBINTR */
+#define USBi_UI BIT(0)
+#define USBi_UEI BIT(1)
+#define USBi_PCI BIT(2)
+#define USBi_URI BIT(6)
+#define USBi_SLI BIT(8)
+
+/* DEVICEADDR */
+#define DEVICEADDR_USBADRA BIT(24)
+#define DEVICEADDR_USBADR (0x7FUL << 25)
+
+/* PORTSC */
+#define PORTSC_FPR BIT(6)
+#define PORTSC_SUSP BIT(7)
+#define PORTSC_PR BIT(8)
+#define PORTSC_HSP BIT(9)
+#define PORTSC_PTC (0x0FUL << 16)
+
+/* DEVLC */
+#define DEVLC_PSPD (0x03UL << 25)
+#define DEVLC_PSPD_HS (0x02UL << 25)
+
+/* USBMODE */
+#define USBMODE_CM (0x03UL << 0)
+#define USBMODE_CM_IDLE (0x00UL << 0)
+#define USBMODE_CM_DEVICE (0x02UL << 0)
+#define USBMODE_CM_HOST (0x03UL << 0)
+#define USBMODE_SLOM BIT(3)
+#define USBMODE_SDIS BIT(4)
+#define USBCMD_ITC(n) (n << 16) /* n = 0, 1, 2, 4, 8, 16, 32, 64 */
+#define USBCMD_ITC_MASK (0xFF << 16)
+
+/* ENDPTCTRL */
+#define ENDPTCTRL_RXS BIT(0)
+#define ENDPTCTRL_RXT (0x03UL << 2)
+#define ENDPTCTRL_RXR BIT(6) /* reserved for port 0 */
+#define ENDPTCTRL_RXE BIT(7)
+#define ENDPTCTRL_TXS BIT(16)
+#define ENDPTCTRL_TXT (0x03UL << 18)
+#define ENDPTCTRL_TXR BIT(22) /* reserved for port 0 */
+#define ENDPTCTRL_TXE BIT(23)
+
+/******************************************************************************
+ * LOGGING
+ *****************************************************************************/
+#define ci13xxx_printk(level, format, args...) \
+do { \
+ if (_udc == NULL) \
+ printk(level "[%s] " format "\n", __func__, ## args); \
+ else \
+ dev_printk(level, _udc->gadget.dev.parent, \
+ "[%s] " format "\n", __func__, ## args); \
+} while (0)
+
+#ifndef err
+#define err(format, args...) ci13xxx_printk(KERN_ERR, format, ## args)
+#endif
+
+#define warn(format, args...) ci13xxx_printk(KERN_WARNING, format, ## args)
+#define info(format, args...) ci13xxx_printk(KERN_INFO, format, ## args)
+
+#ifdef TRACE
+#define trace(format, args...) ci13xxx_printk(KERN_DEBUG, format, ## args)
+#define dbg_trace(format, args...) dev_dbg(dev, format, ##args)
+#else
+#define trace(format, args...) do {} while (0)
+#define dbg_trace(format, args...) do {} while (0)
+#endif
+
+#endif /* _CI13XXX_h_ */
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index fd49fc4..8b481da 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1187,7 +1187,7 @@
ret = __ffs_epfile_read_data(epfile, data, ep->status,
&io_data->data);
goto error_mutex;
- } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_KERNEL))) {
+ } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) {
ret = -ENOMEM;
} else {
req->buf = data;
@@ -3705,6 +3705,11 @@
{
struct ffs_function *func = ffs_func_from_usb(f);
+ if (!test_bit(FFS_FL_BOUND, &func->ffs->flags)) {
+ ffs_log("ffs function do not bind yet.\n");
+ return false;
+ }
+
if (config0 && !(func->ffs->user_flags & FUNCTIONFS_CONFIG0_SETUP))
return false;
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 658b8da..243febf 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -389,6 +389,20 @@
dynamically linked module called "udc-xilinx" and force all
gadget drivers to also be dynamically linked.
+config USB_CI13XXX_MSM
+ tristate "MIPS USB CI13xxx for MSM"
+ select USB_MSM_OTG
+ help
+ MSM SoC has chipidea USB controller. This driver uses
+ ci13xxx_udc core.
+ This driver depends on OTG driver for PHY initialization,
+ clock management, powering up VBUS, and power management.
+ This driver is not supported on boards like trout which
+ has an external PHY.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "ci13xxx_msm" and force all
+ gadget drivers to also be dynamically linked.
#
# LAST -- dummy/emulated controller
#
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 1633d4a..26bfa73 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -192,8 +192,8 @@
void usb_ep_free_request(struct usb_ep *ep,
struct usb_request *req)
{
- ep->ops->free_request(ep, req);
trace_usb_ep_free_request(ep, req, 0);
+ ep->ops->free_request(ep, req);
}
EXPORT_SYMBOL_GPL(usb_ep_free_request);
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index b1b74ff..f76d347 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -150,7 +150,7 @@
To compile this driver as a module, choose M here: the
module will be called phy-isp1301.
-config USB_MSM_OTG
+config QCOM_USB_MSM_OTG
tristate "Qualcomm on-chip USB OTG controller support"
depends on (USB || USB_GADGET) && (ARCH_QCOM || COMPILE_TEST)
depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
@@ -158,13 +158,12 @@
depends on EXTCON
select USB_PHY
help
- Enable this to support the USB OTG transceiver on Qualcomm chips. It
- handles PHY initialization, clock management, and workarounds
- required after resetting the hardware and power management.
- This driver is required even for peripheral only or host only
- mode configurations.
- This driver is not supported on boards like trout which
- has an external PHY.
+ Enable this to support the USB OTG transceiver on Qualcomm development
+ boards. It handles PHY initialization, clock management, and
+ workarounds required after resetting the hardware and power
+ management. This driver is required even for peripheral only or host
+ only mode configurations. This driver is not supported on boards like
+ trout which has an external PHY.
config USB_QCOM_8X16_PHY
tristate "Qualcomm APQ8016/MSM8916 on-chip USB PHY controller support"
@@ -254,4 +253,17 @@
the high-speed PHY which is usually paired with either the ChipIdea or
Synopsys DWC3 USB IPs on MSM SOCs. This driver expects to configure the
PHY with a dedicated register I/O memory region.
+
+config USB_MSM_OTG
+ tristate "Qualcomm on-chip USB OTG controller support"
+ depends on (USB || USB_GADGET) && (ARCH_QCOM || COMPILE_TEST)
+ select USB_PHY
+ help
+ Enable this to support the USB OTG transceiver on Qualcomm chips. It
+ handles PHY initialization, clock management, and workarounds
+ required after resetting the hardware and power management.
+ This driver is required even for peripheral only or host only
+ mode configurations.
+ This driver is not supported on boards like trout which
+ has an external PHY.
endmenu
diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile
index 5b748a6..7e9ffa0 100644
--- a/drivers/usb/phy/Makefile
+++ b/drivers/usb/phy/Makefile
@@ -21,7 +21,7 @@
obj-$(CONFIG_USB_EHCI_TEGRA) += phy-tegra-usb.o
obj-$(CONFIG_USB_GPIO_VBUS) += phy-gpio-vbus-usb.o
obj-$(CONFIG_USB_ISP1301) += phy-isp1301.o
-obj-$(CONFIG_USB_MSM_OTG) += phy-msm-usb.o
+obj-$(CONFIG_QCOM_USB_MSM_OTG) += phy-msm-qcom.o
obj-$(CONFIG_USB_QCOM_8X16_PHY) += phy-qcom-8x16-usb.o
obj-$(CONFIG_USB_MV_OTG) += phy-mv-usb.o
obj-$(CONFIG_USB_MXS_PHY) += phy-mxs-usb.o
@@ -31,3 +31,4 @@
obj-$(CONFIG_USB_MSM_SSPHY_QMP) += phy-msm-ssusb-qmp.o
obj-$(CONFIG_MSM_QUSB_PHY) += phy-msm-qusb.o phy-msm-qusb-v2.o
obj-$(CONFIG_MSM_HSUSB_PHY) += phy-msm-snps-hs.o
+obj-$(CONFIG_USB_MSM_OTG) += phy-msm-usb.o
diff --git a/drivers/usb/phy/phy-msm-qcom.c b/drivers/usb/phy/phy-msm-qcom.c
new file mode 100644
index 0000000..8a34759
--- /dev/null
+++ b/drivers/usb/phy/phy-msm-qcom.c
@@ -0,0 +1,2162 @@
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/extcon.h>
+#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/reboot.h>
+#include <linux/reset.h>
+#include <linux/types.h>
+#include <linux/usb/otg.h>
+
+#include <linux/usb.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/of.h>
+#include <linux/usb/ulpi.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/msm_hsusb_hw.h>
+#include <linux/regulator/consumer.h>
+
+/**
+ * OTG control
+ *
+ * OTG_NO_CONTROL Id/VBUS notifications not required. Useful in host
+ * only configuration.
+ * OTG_PHY_CONTROL Id/VBUS notifications comes form USB PHY.
+ * OTG_PMIC_CONTROL Id/VBUS notifications comes from PMIC hardware.
+ * OTG_USER_CONTROL Id/VBUS notifcations comes from User via sysfs.
+ *
+ */
+enum otg_control_type {
+ OTG_NO_CONTROL = 0,
+ OTG_PHY_CONTROL,
+ OTG_PMIC_CONTROL,
+ OTG_USER_CONTROL,
+};
+
+/**
+ * PHY used in
+ *
+ * INVALID_PHY Unsupported PHY
+ * CI_45NM_INTEGRATED_PHY Chipidea 45nm integrated PHY
+ * SNPS_28NM_INTEGRATED_PHY Synopsis 28nm integrated PHY
+ *
+ */
+enum msm_usb_phy_type {
+ INVALID_PHY = 0,
+ CI_45NM_INTEGRATED_PHY,
+ SNPS_28NM_INTEGRATED_PHY,
+};
+
+#define IDEV_CHG_MAX 1500
+#define IUNIT 100
+
+/**
+ * Different states involved in USB charger detection.
+ *
+ * USB_CHG_STATE_UNDEFINED USB charger is not connected or detection
+ * process is not yet started.
+ * USB_CHG_STATE_WAIT_FOR_DCD Waiting for Data pins contact.
+ * USB_CHG_STATE_DCD_DONE Data pin contact is detected.
+ * USB_CHG_STATE_PRIMARY_DONE Primary detection is completed (Detects
+ * between SDP and DCP/CDP).
+ * USB_CHG_STATE_SECONDARY_DONE Secondary detection is completed (Detects
+ * between DCP and CDP).
+ * USB_CHG_STATE_DETECTED USB charger type is determined.
+ *
+ */
+enum usb_chg_state {
+ USB_CHG_STATE_UNDEFINED = 0,
+ USB_CHG_STATE_WAIT_FOR_DCD,
+ USB_CHG_STATE_DCD_DONE,
+ USB_CHG_STATE_PRIMARY_DONE,
+ USB_CHG_STATE_SECONDARY_DONE,
+ USB_CHG_STATE_DETECTED,
+};
+
+/**
+ * USB charger types
+ *
+ * USB_INVALID_CHARGER Invalid USB charger.
+ * USB_SDP_CHARGER Standard downstream port. Refers to a downstream port
+ * on USB2.0 compliant host/hub.
+ * USB_DCP_CHARGER Dedicated charger port (AC charger/ Wall charger).
+ * USB_CDP_CHARGER Charging downstream port. Enumeration can happen and
+ * IDEV_CHG_MAX can be drawn irrespective of USB state.
+ *
+ */
+enum usb_chg_type {
+ USB_INVALID_CHARGER = 0,
+ USB_SDP_CHARGER,
+ USB_DCP_CHARGER,
+ USB_CDP_CHARGER,
+};
+
+/**
+ * struct msm_otg_platform_data - platform device data
+ * for msm_otg driver.
+ * @phy_init_seq: PHY configuration sequence values. Value of -1 is reserved as
+ * "do not overwrite default vaule at this address".
+ * @phy_init_sz: PHY configuration sequence size.
+ * @vbus_power: VBUS power on/off routine.
+ * @power_budget: VBUS power budget in mA (0 will be treated as 500mA).
+ * @mode: Supported mode (OTG/peripheral/host).
+ * @otg_control: OTG switch controlled by user/Id pin
+ */
+struct msm_otg_platform_data {
+ int *phy_init_seq;
+ int phy_init_sz;
+ void (*vbus_power)(bool on);
+ unsigned power_budget;
+ enum usb_dr_mode mode;
+ enum otg_control_type otg_control;
+ enum msm_usb_phy_type phy_type;
+ void (*setup_gpio)(enum usb_otg_state state);
+};
+
+/**
+ * struct msm_usb_cable - structure for exteternal connector cable
+ * state tracking
+ * @nb: hold event notification callback
+ * @conn: used for notification registration
+ */
+struct msm_usb_cable {
+ struct notifier_block nb;
+ struct extcon_dev *extcon;
+};
+
+/**
+ * struct msm_otg: OTG driver data. Shared by HCD and DCD.
+ * @otg: USB OTG Transceiver structure.
+ * @pdata: otg device platform data.
+ * @irq: IRQ number assigned for HSUSB controller.
+ * @clk: clock struct of usb_hs_clk.
+ * @pclk: clock struct of usb_hs_pclk.
+ * @core_clk: clock struct of usb_hs_core_clk.
+ * @regs: ioremapped register base address.
+ * @inputs: OTG state machine inputs(Id, SessValid etc).
+ * @sm_work: OTG state machine work.
+ * @in_lpm: indicates low power mode (LPM) state.
+ * @async_int: Async interrupt arrived.
+ * @cur_power: The amount of mA available from downstream port.
+ * @chg_work: Charger detection work.
+ * @chg_state: The state of charger detection process.
+ * @chg_type: The type of charger attached.
+ * @dcd_retires: The retry count used to track Data contact
+ * detection process.
+ * @manual_pullup: true if VBUS is not routed to USB controller/phy
+ * and controller driver therefore enables pull-up explicitly before
+ * starting controller using usbcmd run/stop bit.
+ * @vbus: VBUS signal state trakining, using extcon framework
+ * @id: ID signal state trakining, using extcon framework
+ * @switch_gpio: Descriptor for GPIO used to control external Dual
+ * SPDT USB Switch.
+ * @reboot: Used to inform the driver to route USB D+/D- line to Device
+ * connector
+ */
+struct msm_otg {
+ struct usb_phy phy;
+ struct msm_otg_platform_data *pdata;
+ int irq;
+ struct clk *clk;
+ struct clk *pclk;
+ struct clk *core_clk;
+ void __iomem *regs;
+#define ID 0
+#define B_SESS_VLD 1
+ unsigned long inputs;
+ struct work_struct sm_work;
+ atomic_t in_lpm;
+ int async_int;
+ unsigned cur_power;
+ int phy_number;
+ struct delayed_work chg_work;
+ enum usb_chg_state chg_state;
+ enum usb_chg_type chg_type;
+ u8 dcd_retries;
+ struct regulator *v3p3;
+ struct regulator *v1p8;
+ struct regulator *vddcx;
+
+ struct reset_control *phy_rst;
+ struct reset_control *link_rst;
+ int vdd_levels[3];
+
+ bool manual_pullup;
+
+ struct msm_usb_cable vbus;
+ struct msm_usb_cable id;
+
+ struct gpio_desc *switch_gpio;
+ struct notifier_block reboot;
+};
+
+#define MSM_USB_BASE (motg->regs)
+#define DRIVER_NAME "msm_otg"
+
+#define ULPI_IO_TIMEOUT_USEC (10 * 1000)
+#define LINK_RESET_TIMEOUT_USEC (250 * 1000)
+
+#define USB_PHY_3P3_VOL_MIN 3050000 /* uV */
+#define USB_PHY_3P3_VOL_MAX 3300000 /* uV */
+#define USB_PHY_3P3_HPM_LOAD 50000 /* uA */
+#define USB_PHY_3P3_LPM_LOAD 4000 /* uA */
+
+#define USB_PHY_1P8_VOL_MIN 1800000 /* uV */
+#define USB_PHY_1P8_VOL_MAX 1800000 /* uV */
+#define USB_PHY_1P8_HPM_LOAD 50000 /* uA */
+#define USB_PHY_1P8_LPM_LOAD 4000 /* uA */
+
+#define USB_PHY_VDD_DIG_VOL_MIN 1000000 /* uV */
+#define USB_PHY_VDD_DIG_VOL_MAX 1320000 /* uV */
+#define USB_PHY_SUSP_DIG_VOL 500000 /* uV */
+
+enum vdd_levels {
+ VDD_LEVEL_NONE = 0,
+ VDD_LEVEL_MIN,
+ VDD_LEVEL_MAX,
+};
+
+static int msm_hsusb_init_vddcx(struct msm_otg *motg, int init)
+{
+ int ret = 0;
+
+ if (init) {
+ ret = regulator_set_voltage(motg->vddcx,
+ motg->vdd_levels[VDD_LEVEL_MIN],
+ motg->vdd_levels[VDD_LEVEL_MAX]);
+ if (ret) {
+ dev_err(motg->phy.dev, "Cannot set vddcx voltage\n");
+ return ret;
+ }
+
+ ret = regulator_enable(motg->vddcx);
+ if (ret)
+ dev_err(motg->phy.dev, "unable to enable hsusb vddcx\n");
+ } else {
+ ret = regulator_set_voltage(motg->vddcx, 0,
+ motg->vdd_levels[VDD_LEVEL_MAX]);
+ if (ret)
+ dev_err(motg->phy.dev, "Cannot set vddcx voltage\n");
+ ret = regulator_disable(motg->vddcx);
+ if (ret)
+ dev_err(motg->phy.dev, "unable to disable hsusb vddcx\n");
+ }
+
+ return ret;
+}
+
+static int msm_hsusb_ldo_init(struct msm_otg *motg, int init)
+{
+ int rc = 0;
+
+ if (init) {
+ rc = regulator_set_voltage(motg->v3p3, USB_PHY_3P3_VOL_MIN,
+ USB_PHY_3P3_VOL_MAX);
+ if (rc) {
+ dev_err(motg->phy.dev, "Cannot set v3p3 voltage\n");
+ goto exit;
+ }
+ rc = regulator_enable(motg->v3p3);
+ if (rc) {
+ dev_err(motg->phy.dev, "unable to enable the hsusb 3p3\n");
+ goto exit;
+ }
+ rc = regulator_set_voltage(motg->v1p8, USB_PHY_1P8_VOL_MIN,
+ USB_PHY_1P8_VOL_MAX);
+ if (rc) {
+ dev_err(motg->phy.dev, "Cannot set v1p8 voltage\n");
+ goto disable_3p3;
+ }
+ rc = regulator_enable(motg->v1p8);
+ if (rc) {
+ dev_err(motg->phy.dev, "unable to enable the hsusb 1p8\n");
+ goto disable_3p3;
+ }
+
+ return 0;
+ }
+
+ regulator_disable(motg->v1p8);
+disable_3p3:
+ regulator_disable(motg->v3p3);
+exit:
+ return rc;
+}
+
+static int msm_hsusb_ldo_set_mode(struct msm_otg *motg, int on)
+{
+ int ret = 0;
+
+ if (on) {
+ ret = regulator_set_load(motg->v1p8, USB_PHY_1P8_HPM_LOAD);
+ if (ret < 0) {
+ pr_err("Could not set HPM for v1p8\n");
+ return ret;
+ }
+ ret = regulator_set_load(motg->v3p3, USB_PHY_3P3_HPM_LOAD);
+ if (ret < 0) {
+ pr_err("Could not set HPM for v3p3\n");
+ regulator_set_load(motg->v1p8, USB_PHY_1P8_LPM_LOAD);
+ return ret;
+ }
+ } else {
+ ret = regulator_set_load(motg->v1p8, USB_PHY_1P8_LPM_LOAD);
+ if (ret < 0)
+ pr_err("Could not set LPM for v1p8\n");
+ ret = regulator_set_load(motg->v3p3, USB_PHY_3P3_LPM_LOAD);
+ if (ret < 0)
+ pr_err("Could not set LPM for v3p3\n");
+ }
+
+ pr_debug("reg (%s)\n", on ? "HPM" : "LPM");
+ return ret < 0 ? ret : 0;
+}
+
+static int ulpi_read(struct usb_phy *phy, u32 reg)
+{
+ struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
+ int cnt = 0;
+
+ /* initiate read operation */
+ writel(ULPI_RUN | ULPI_READ | ULPI_ADDR(reg),
+ USB_ULPI_VIEWPORT);
+
+ /* wait for completion */
+ while (cnt < ULPI_IO_TIMEOUT_USEC) {
+ if (!(readl(USB_ULPI_VIEWPORT) & ULPI_RUN))
+ break;
+ udelay(1);
+ cnt++;
+ }
+
+ if (cnt >= ULPI_IO_TIMEOUT_USEC) {
+ dev_err(phy->dev, "ulpi_read: timeout %08x\n",
+ readl(USB_ULPI_VIEWPORT));
+ return -ETIMEDOUT;
+ }
+ return ULPI_DATA_READ(readl(USB_ULPI_VIEWPORT));
+}
+
+static int ulpi_write(struct usb_phy *phy, u32 val, u32 reg)
+{
+ struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
+ int cnt = 0;
+
+ /* initiate write operation */
+ writel(ULPI_RUN | ULPI_WRITE |
+ ULPI_ADDR(reg) | ULPI_DATA(val),
+ USB_ULPI_VIEWPORT);
+
+ /* wait for completion */
+ while (cnt < ULPI_IO_TIMEOUT_USEC) {
+ if (!(readl(USB_ULPI_VIEWPORT) & ULPI_RUN))
+ break;
+ udelay(1);
+ cnt++;
+ }
+
+ if (cnt >= ULPI_IO_TIMEOUT_USEC) {
+ dev_err(phy->dev, "ulpi_write: timeout\n");
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static struct usb_phy_io_ops msm_otg_io_ops = {
+ .read = ulpi_read,
+ .write = ulpi_write,
+};
+
+static void ulpi_init(struct msm_otg *motg)
+{
+ struct msm_otg_platform_data *pdata = motg->pdata;
+ int *seq = pdata->phy_init_seq, idx;
+ u32 addr = ULPI_EXT_VENDOR_SPECIFIC;
+
+ for (idx = 0; idx < pdata->phy_init_sz; idx++) {
+ if (seq[idx] == -1)
+ continue;
+
+ dev_vdbg(motg->phy.dev, "ulpi: write 0x%02x to 0x%02x\n",
+ seq[idx], addr + idx);
+ ulpi_write(&motg->phy, seq[idx], addr + idx);
+ }
+}
+
+static int msm_phy_notify_disconnect(struct usb_phy *phy,
+ enum usb_device_speed speed)
+{
+ struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
+ int val;
+
+ if (motg->manual_pullup) {
+ val = ULPI_MISC_A_VBUSVLDEXT | ULPI_MISC_A_VBUSVLDEXTSEL;
+ usb_phy_io_write(phy, val, ULPI_CLR(ULPI_MISC_A));
+ }
+
+ /*
+ * Put the transceiver in non-driving mode. Otherwise host
+ * may not detect soft-disconnection.
+ */
+ val = ulpi_read(phy, ULPI_FUNC_CTRL);
+ val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
+ val |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
+ ulpi_write(phy, val, ULPI_FUNC_CTRL);
+
+ return 0;
+}
+
+static int msm_otg_link_clk_reset(struct msm_otg *motg, bool assert)
+{
+ int ret;
+
+ if (assert)
+ ret = reset_control_assert(motg->link_rst);
+ else
+ ret = reset_control_deassert(motg->link_rst);
+
+ if (ret)
+ dev_err(motg->phy.dev, "usb link clk reset %s failed\n",
+ assert ? "assert" : "deassert");
+
+ return ret;
+}
+
+static int msm_otg_phy_clk_reset(struct msm_otg *motg)
+{
+ int ret = 0;
+
+ if (motg->phy_rst)
+ ret = reset_control_reset(motg->phy_rst);
+
+ if (ret)
+ dev_err(motg->phy.dev, "usb phy clk reset failed\n");
+
+ return ret;
+}
+
+static int msm_link_reset(struct msm_otg *motg)
+{
+ u32 val;
+ int ret;
+
+ ret = msm_otg_link_clk_reset(motg, 1);
+ if (ret)
+ return ret;
+
+ /* wait for 1ms delay as suggested in HPG. */
+ usleep_range(1000, 1200);
+
+ ret = msm_otg_link_clk_reset(motg, 0);
+ if (ret)
+ return ret;
+
+ if (motg->phy_number)
+ writel(readl(USB_PHY_CTRL2) | BIT(16), USB_PHY_CTRL2);
+
+ /* put transceiver in serial mode as part of reset */
+ val = readl(USB_PORTSC) & ~PORTSC_PTS_MASK;
+ writel(val | PORTSC_PTS_SERIAL, USB_PORTSC);
+
+ return 0;
+}
+
+static int msm_otg_reset(struct usb_phy *phy)
+{
+ struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
+ int cnt = 0;
+
+ writel(USBCMD_RESET, USB_USBCMD);
+ while (cnt < LINK_RESET_TIMEOUT_USEC) {
+ if (!(readl(USB_USBCMD) & USBCMD_RESET))
+ break;
+ udelay(1);
+ cnt++;
+ }
+ if (cnt >= LINK_RESET_TIMEOUT_USEC)
+ return -ETIMEDOUT;
+
+ /* select ULPI phy and clear other status/control bits in PORTSC */
+ writel(PORTSC_PTS_ULPI, USB_PORTSC);
+
+ writel(0x0, USB_AHBBURST);
+ writel(0x08, USB_AHBMODE);
+
+ if (motg->phy_number)
+ writel(readl(USB_PHY_CTRL2) | BIT(16), USB_PHY_CTRL2);
+ return 0;
+}
+
+static void msm_phy_reset(struct msm_otg *motg)
+{
+ void __iomem *addr;
+
+ if (motg->pdata->phy_type != SNPS_28NM_INTEGRATED_PHY) {
+ msm_otg_phy_clk_reset(motg);
+ return;
+ }
+
+ addr = USB_PHY_CTRL;
+ if (motg->phy_number)
+ addr = USB_PHY_CTRL2;
+
+ /* Assert USB PHY_POR */
+ writel(readl(addr) | PHY_POR_ASSERT, addr);
+
+ /*
+ * wait for minimum 10 microseconds as suggested in HPG.
+ * Use a slightly larger value since the exact value didn't
+ * work 100% of the time.
+ */
+ udelay(12);
+
+ /* Deassert USB PHY_POR */
+ writel(readl(addr) & ~PHY_POR_ASSERT, addr);
+}
+
+static int msm_usb_reset(struct usb_phy *phy)
+{
+ struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
+ int ret;
+
+ if (!IS_ERR(motg->core_clk))
+ clk_prepare_enable(motg->core_clk);
+
+ ret = msm_link_reset(motg);
+ if (ret) {
+ dev_err(phy->dev, "phy_reset failed\n");
+ return ret;
+ }
+
+ ret = msm_otg_reset(&motg->phy);
+ if (ret) {
+ dev_err(phy->dev, "link reset failed\n");
+ return ret;
+ }
+
+ msleep(100);
+
+ /* Reset USB PHY after performing USB Link RESET */
+ msm_phy_reset(motg);
+
+ if (!IS_ERR(motg->core_clk))
+ clk_disable_unprepare(motg->core_clk);
+
+ return 0;
+}
+
+static int msm_phy_init(struct usb_phy *phy)
+{
+ struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
+ struct msm_otg_platform_data *pdata = motg->pdata;
+ u32 val, ulpi_val = 0;
+
+ /* Program USB PHY Override registers. */
+ ulpi_init(motg);
+
+ /*
+ * It is recommended in HPG to reset USB PHY after programming
+ * USB PHY Override registers.
+ */
+ msm_phy_reset(motg);
+
+ if (pdata->otg_control == OTG_PHY_CONTROL) {
+ val = readl(USB_OTGSC);
+ if (pdata->mode == USB_DR_MODE_OTG) {
+ ulpi_val = ULPI_INT_IDGRD | ULPI_INT_SESS_VALID;
+ val |= OTGSC_IDIE | OTGSC_BSVIE;
+ } else if (pdata->mode == USB_DR_MODE_PERIPHERAL) {
+ ulpi_val = ULPI_INT_SESS_VALID;
+ val |= OTGSC_BSVIE;
+ }
+ writel(val, USB_OTGSC);
+ ulpi_write(phy, ulpi_val, ULPI_USB_INT_EN_RISE);
+ ulpi_write(phy, ulpi_val, ULPI_USB_INT_EN_FALL);
+ }
+
+ if (motg->manual_pullup) {
+ val = ULPI_MISC_A_VBUSVLDEXTSEL | ULPI_MISC_A_VBUSVLDEXT;
+ ulpi_write(phy, val, ULPI_SET(ULPI_MISC_A));
+
+ val = readl(USB_GENCONFIG_2);
+ val |= GENCONFIG_2_SESS_VLD_CTRL_EN;
+ writel(val, USB_GENCONFIG_2);
+
+ val = readl(USB_USBCMD);
+ val |= USBCMD_SESS_VLD_CTRL;
+ writel(val, USB_USBCMD);
+
+ val = ulpi_read(phy, ULPI_FUNC_CTRL);
+ val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
+ val |= ULPI_FUNC_CTRL_OPMODE_NORMAL;
+ ulpi_write(phy, val, ULPI_FUNC_CTRL);
+ }
+
+ if (motg->phy_number)
+ writel(readl(USB_PHY_CTRL2) | BIT(16), USB_PHY_CTRL2);
+
+ return 0;
+}
+
+#define PHY_SUSPEND_TIMEOUT_USEC (500 * 1000)
+#define PHY_RESUME_TIMEOUT_USEC (100 * 1000)
+
+#ifdef CONFIG_PM
+
+static int msm_hsusb_config_vddcx(struct msm_otg *motg, int high)
+{
+ int max_vol = motg->vdd_levels[VDD_LEVEL_MAX];
+ int min_vol;
+ int ret;
+
+ if (high)
+ min_vol = motg->vdd_levels[VDD_LEVEL_MIN];
+ else
+ min_vol = motg->vdd_levels[VDD_LEVEL_NONE];
+
+ ret = regulator_set_voltage(motg->vddcx, min_vol, max_vol);
+ if (ret) {
+ pr_err("Cannot set vddcx voltage\n");
+ return ret;
+ }
+
+ pr_debug("%s: min_vol:%d max_vol:%d\n", __func__, min_vol, max_vol);
+
+ return ret;
+}
+
+static int msm_otg_suspend(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+ struct usb_bus *bus = phy->otg->host;
+ struct msm_otg_platform_data *pdata = motg->pdata;
+ void __iomem *addr;
+ int cnt = 0;
+
+ if (atomic_read(&motg->in_lpm))
+ return 0;
+
+ disable_irq(motg->irq);
+ /*
+ * Chipidea 45-nm PHY suspend sequence:
+ *
+ * Interrupt Latch Register auto-clear feature is not present
+ * in all PHY versions. Latch register is clear on read type.
+ * Clear latch register to avoid spurious wakeup from
+ * low power mode (LPM).
+ *
+ * PHY comparators are disabled when PHY enters into low power
+ * mode (LPM). Keep PHY comparators ON in LPM only when we expect
+ * VBUS/Id notifications from USB PHY. Otherwise turn off USB
+ * PHY comparators. This save significant amount of power.
+ *
+ * PLL is not turned off when PHY enters into low power mode (LPM).
+ * Disable PLL for maximum power savings.
+ */
+
+ if (motg->pdata->phy_type == CI_45NM_INTEGRATED_PHY) {
+ ulpi_read(phy, 0x14);
+ if (pdata->otg_control == OTG_PHY_CONTROL)
+ ulpi_write(phy, 0x01, 0x30);
+ ulpi_write(phy, 0x08, 0x09);
+ }
+
+ /*
+ * PHY may take some time or even fail to enter into low power
+ * mode (LPM). Hence poll for 500 msec and reset the PHY and link
+ * in failure case.
+ */
+ writel(readl(USB_PORTSC) | PORTSC_PHCD, USB_PORTSC);
+ while (cnt < PHY_SUSPEND_TIMEOUT_USEC) {
+ if (readl(USB_PORTSC) & PORTSC_PHCD)
+ break;
+ udelay(1);
+ cnt++;
+ }
+
+ if (cnt >= PHY_SUSPEND_TIMEOUT_USEC) {
+ dev_err(phy->dev, "Unable to suspend PHY\n");
+ msm_otg_reset(phy);
+ enable_irq(motg->irq);
+ return -ETIMEDOUT;
+ }
+
+ /*
+ * PHY has capability to generate interrupt asynchronously in low
+ * power mode (LPM). This interrupt is level triggered. So USB IRQ
+ * line must be disabled till async interrupt enable bit is cleared
+ * in USBCMD register. Assert STP (ULPI interface STOP signal) to
+ * block data communication from PHY.
+ */
+ writel(readl(USB_USBCMD) | ASYNC_INTR_CTRL | ULPI_STP_CTRL, USB_USBCMD);
+
+ addr = USB_PHY_CTRL;
+ if (motg->phy_number)
+ addr = USB_PHY_CTRL2;
+
+ if (motg->pdata->phy_type == SNPS_28NM_INTEGRATED_PHY &&
+ motg->pdata->otg_control == OTG_PMIC_CONTROL)
+ writel(readl(addr) | PHY_RETEN, addr);
+
+ clk_disable_unprepare(motg->pclk);
+ clk_disable_unprepare(motg->clk);
+ if (!IS_ERR(motg->core_clk))
+ clk_disable_unprepare(motg->core_clk);
+
+ if (motg->pdata->phy_type == SNPS_28NM_INTEGRATED_PHY &&
+ motg->pdata->otg_control == OTG_PMIC_CONTROL) {
+ msm_hsusb_ldo_set_mode(motg, 0);
+ msm_hsusb_config_vddcx(motg, 0);
+ }
+
+ if (device_may_wakeup(phy->dev))
+ enable_irq_wake(motg->irq);
+ if (bus)
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &(bus_to_hcd(bus))->flags);
+
+ atomic_set(&motg->in_lpm, 1);
+ enable_irq(motg->irq);
+
+ dev_info(phy->dev, "USB in low power mode\n");
+
+ return 0;
+}
+
+static int msm_otg_resume(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+ struct usb_bus *bus = phy->otg->host;
+ void __iomem *addr;
+ int cnt = 0;
+ unsigned temp;
+
+ if (!atomic_read(&motg->in_lpm))
+ return 0;
+
+ clk_prepare_enable(motg->pclk);
+ clk_prepare_enable(motg->clk);
+ if (!IS_ERR(motg->core_clk))
+ clk_prepare_enable(motg->core_clk);
+
+ if (motg->pdata->phy_type == SNPS_28NM_INTEGRATED_PHY &&
+ motg->pdata->otg_control == OTG_PMIC_CONTROL) {
+
+ addr = USB_PHY_CTRL;
+ if (motg->phy_number)
+ addr = USB_PHY_CTRL2;
+
+ msm_hsusb_ldo_set_mode(motg, 1);
+ msm_hsusb_config_vddcx(motg, 1);
+ writel(readl(addr) & ~PHY_RETEN, addr);
+ }
+
+ temp = readl(USB_USBCMD);
+ temp &= ~ASYNC_INTR_CTRL;
+ temp &= ~ULPI_STP_CTRL;
+ writel(temp, USB_USBCMD);
+
+ /*
+ * PHY comes out of low power mode (LPM) in case of wakeup
+ * from asynchronous interrupt.
+ */
+ if (!(readl(USB_PORTSC) & PORTSC_PHCD))
+ goto skip_phy_resume;
+
+ writel(readl(USB_PORTSC) & ~PORTSC_PHCD, USB_PORTSC);
+ while (cnt < PHY_RESUME_TIMEOUT_USEC) {
+ if (!(readl(USB_PORTSC) & PORTSC_PHCD))
+ break;
+ udelay(1);
+ cnt++;
+ }
+
+ if (cnt >= PHY_RESUME_TIMEOUT_USEC) {
+ /*
+ * This is a fatal error. Reset the link and
+ * PHY. USB state can not be restored. Re-insertion
+ * of USB cable is the only way to get USB working.
+ */
+ dev_err(phy->dev, "Unable to resume USB. Re-plugin the cable\n");
+ msm_otg_reset(phy);
+ }
+
+skip_phy_resume:
+ if (device_may_wakeup(phy->dev))
+ disable_irq_wake(motg->irq);
+ if (bus)
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &(bus_to_hcd(bus))->flags);
+
+ atomic_set(&motg->in_lpm, 0);
+
+ if (motg->async_int) {
+ motg->async_int = 0;
+ pm_runtime_put(phy->dev);
+ enable_irq(motg->irq);
+ }
+
+ dev_info(phy->dev, "USB exited from low power mode\n");
+
+ return 0;
+}
+#endif
+
+static void msm_otg_notify_charger(struct msm_otg *motg, unsigned mA)
+{
+ if (motg->cur_power == mA)
+ return;
+
+ /* TODO: Notify PMIC about available current */
+ dev_info(motg->phy.dev, "Avail curr from USB = %u\n", mA);
+ motg->cur_power = mA;
+}
+
+static int msm_otg_set_power(struct usb_phy *phy, unsigned mA)
+{
+ struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
+
+ /*
+ * Gadget driver uses set_power method to notify about the
+ * available current based on suspend/configured states.
+ *
+ * IDEV_CHG can be drawn irrespective of suspend/un-configured
+ * states when CDP/ACA is connected.
+ */
+ if (motg->chg_type == USB_SDP_CHARGER)
+ msm_otg_notify_charger(motg, mA);
+
+ return 0;
+}
+
+static void msm_otg_start_host(struct usb_phy *phy, int on)
+{
+ struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
+ struct msm_otg_platform_data *pdata = motg->pdata;
+ struct usb_hcd *hcd;
+
+ if (!phy->otg->host)
+ return;
+
+ hcd = bus_to_hcd(phy->otg->host);
+
+ if (on) {
+ dev_dbg(phy->dev, "host on\n");
+
+ if (pdata->vbus_power)
+ pdata->vbus_power(1);
+ /*
+ * Some boards have a switch cotrolled by gpio
+ * to enable/disable internal HUB. Enable internal
+ * HUB before kicking the host.
+ */
+ if (pdata->setup_gpio)
+ pdata->setup_gpio(OTG_STATE_A_HOST);
+#ifdef CONFIG_USB
+ usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
+ device_wakeup_enable(hcd->self.controller);
+#endif
+ } else {
+ dev_dbg(phy->dev, "host off\n");
+
+#ifdef CONFIG_USB
+ usb_remove_hcd(hcd);
+#endif
+ if (pdata->setup_gpio)
+ pdata->setup_gpio(OTG_STATE_UNDEFINED);
+ if (pdata->vbus_power)
+ pdata->vbus_power(0);
+ }
+}
+
+static int msm_otg_set_host(struct usb_otg *otg, struct usb_bus *host)
+{
+ struct msm_otg *motg = container_of(otg->usb_phy, struct msm_otg, phy);
+ struct usb_hcd *hcd;
+
+ /*
+ * Fail host registration if this board can support
+ * only peripheral configuration.
+ */
+ if (motg->pdata->mode == USB_DR_MODE_PERIPHERAL) {
+ dev_info(otg->usb_phy->dev, "Host mode is not supported\n");
+ return -ENODEV;
+ }
+
+ if (!host) {
+ if (otg->state == OTG_STATE_A_HOST) {
+ pm_runtime_get_sync(otg->usb_phy->dev);
+ msm_otg_start_host(otg->usb_phy, 0);
+ otg->host = NULL;
+ otg->state = OTG_STATE_UNDEFINED;
+ schedule_work(&motg->sm_work);
+ } else {
+ otg->host = NULL;
+ }
+
+ return 0;
+ }
+
+ hcd = bus_to_hcd(host);
+ hcd->power_budget = motg->pdata->power_budget;
+
+ otg->host = host;
+ dev_dbg(otg->usb_phy->dev, "host driver registered w/ tranceiver\n");
+
+ pm_runtime_get_sync(otg->usb_phy->dev);
+ schedule_work(&motg->sm_work);
+
+ return 0;
+}
+
+static void msm_otg_start_peripheral(struct usb_phy *phy, int on)
+{
+ struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
+ struct msm_otg_platform_data *pdata = motg->pdata;
+
+ if (!phy->otg->gadget)
+ return;
+
+ if (on) {
+ dev_dbg(phy->dev, "gadget on\n");
+ /*
+ * Some boards have a switch cotrolled by gpio
+ * to enable/disable internal HUB. Disable internal
+ * HUB before kicking the gadget.
+ */
+ if (pdata->setup_gpio)
+ pdata->setup_gpio(OTG_STATE_B_PERIPHERAL);
+ usb_gadget_vbus_connect(phy->otg->gadget);
+ } else {
+ dev_dbg(phy->dev, "gadget off\n");
+ usb_gadget_vbus_disconnect(phy->otg->gadget);
+ if (pdata->setup_gpio)
+ pdata->setup_gpio(OTG_STATE_UNDEFINED);
+ }
+
+}
+
+static int msm_otg_set_peripheral(struct usb_otg *otg,
+ struct usb_gadget *gadget)
+{
+ struct msm_otg *motg = container_of(otg->usb_phy, struct msm_otg, phy);
+
+ /*
+ * Fail peripheral registration if this board can support
+ * only host configuration.
+ */
+ if (motg->pdata->mode == USB_DR_MODE_HOST) {
+ dev_info(otg->usb_phy->dev, "Peripheral mode is not supported\n");
+ return -ENODEV;
+ }
+
+ if (!gadget) {
+ if (otg->state == OTG_STATE_B_PERIPHERAL) {
+ pm_runtime_get_sync(otg->usb_phy->dev);
+ msm_otg_start_peripheral(otg->usb_phy, 0);
+ otg->gadget = NULL;
+ otg->state = OTG_STATE_UNDEFINED;
+ schedule_work(&motg->sm_work);
+ } else {
+ otg->gadget = NULL;
+ }
+
+ return 0;
+ }
+ otg->gadget = gadget;
+ dev_dbg(otg->usb_phy->dev,
+ "peripheral driver registered w/ tranceiver\n");
+
+ pm_runtime_get_sync(otg->usb_phy->dev);
+ schedule_work(&motg->sm_work);
+
+ return 0;
+}
+
+static bool msm_chg_check_secondary_det(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+ u32 chg_det;
+ bool ret = false;
+
+ switch (motg->pdata->phy_type) {
+ case CI_45NM_INTEGRATED_PHY:
+ chg_det = ulpi_read(phy, 0x34);
+ ret = chg_det & (1 << 4);
+ break;
+ case SNPS_28NM_INTEGRATED_PHY:
+ chg_det = ulpi_read(phy, 0x87);
+ ret = chg_det & 1;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static void msm_chg_enable_secondary_det(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+ u32 chg_det;
+
+ switch (motg->pdata->phy_type) {
+ case CI_45NM_INTEGRATED_PHY:
+ chg_det = ulpi_read(phy, 0x34);
+ /* Turn off charger block */
+ chg_det |= ~(1 << 1);
+ ulpi_write(phy, chg_det, 0x34);
+ udelay(20);
+ /* control chg block via ULPI */
+ chg_det &= ~(1 << 3);
+ ulpi_write(phy, chg_det, 0x34);
+ /* put it in host mode for enabling D- source */
+ chg_det &= ~(1 << 2);
+ ulpi_write(phy, chg_det, 0x34);
+ /* Turn on chg detect block */
+ chg_det &= ~(1 << 1);
+ ulpi_write(phy, chg_det, 0x34);
+ udelay(20);
+ /* enable chg detection */
+ chg_det &= ~(1 << 0);
+ ulpi_write(phy, chg_det, 0x34);
+ break;
+ case SNPS_28NM_INTEGRATED_PHY:
+ /*
+ * Configure DM as current source, DP as current sink
+ * and enable battery charging comparators.
+ */
+ ulpi_write(phy, 0x8, 0x85);
+ ulpi_write(phy, 0x2, 0x85);
+ ulpi_write(phy, 0x1, 0x85);
+ break;
+ default:
+ break;
+ }
+}
+
+static bool msm_chg_check_primary_det(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+ u32 chg_det;
+ bool ret = false;
+
+ switch (motg->pdata->phy_type) {
+ case CI_45NM_INTEGRATED_PHY:
+ chg_det = ulpi_read(phy, 0x34);
+ ret = chg_det & (1 << 4);
+ break;
+ case SNPS_28NM_INTEGRATED_PHY:
+ chg_det = ulpi_read(phy, 0x87);
+ ret = chg_det & 1;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static void msm_chg_enable_primary_det(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+ u32 chg_det;
+
+ switch (motg->pdata->phy_type) {
+ case CI_45NM_INTEGRATED_PHY:
+ chg_det = ulpi_read(phy, 0x34);
+ /* enable chg detection */
+ chg_det &= ~(1 << 0);
+ ulpi_write(phy, chg_det, 0x34);
+ break;
+ case SNPS_28NM_INTEGRATED_PHY:
+ /*
+ * Configure DP as current source, DM as current sink
+ * and enable battery charging comparators.
+ */
+ ulpi_write(phy, 0x2, 0x85);
+ ulpi_write(phy, 0x1, 0x85);
+ break;
+ default:
+ break;
+ }
+}
+
+static bool msm_chg_check_dcd(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+ u32 line_state;
+ bool ret = false;
+
+ switch (motg->pdata->phy_type) {
+ case CI_45NM_INTEGRATED_PHY:
+ line_state = ulpi_read(phy, 0x15);
+ ret = !(line_state & 1);
+ break;
+ case SNPS_28NM_INTEGRATED_PHY:
+ line_state = ulpi_read(phy, 0x87);
+ ret = line_state & 2;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static void msm_chg_disable_dcd(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+ u32 chg_det;
+
+ switch (motg->pdata->phy_type) {
+ case CI_45NM_INTEGRATED_PHY:
+ chg_det = ulpi_read(phy, 0x34);
+ chg_det &= ~(1 << 5);
+ ulpi_write(phy, chg_det, 0x34);
+ break;
+ case SNPS_28NM_INTEGRATED_PHY:
+ ulpi_write(phy, 0x10, 0x86);
+ break;
+ default:
+ break;
+ }
+}
+
+static void msm_chg_enable_dcd(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+ u32 chg_det;
+
+ switch (motg->pdata->phy_type) {
+ case CI_45NM_INTEGRATED_PHY:
+ chg_det = ulpi_read(phy, 0x34);
+ /* Turn on D+ current source */
+ chg_det |= (1 << 5);
+ ulpi_write(phy, chg_det, 0x34);
+ break;
+ case SNPS_28NM_INTEGRATED_PHY:
+ /* Data contact detection enable */
+ ulpi_write(phy, 0x10, 0x85);
+ break;
+ default:
+ break;
+ }
+}
+
+static void msm_chg_block_on(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+ u32 func_ctrl, chg_det;
+
+ /* put the controller in non-driving mode */
+ func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL);
+ func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
+ func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
+ ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL);
+
+ switch (motg->pdata->phy_type) {
+ case CI_45NM_INTEGRATED_PHY:
+ chg_det = ulpi_read(phy, 0x34);
+ /* control chg block via ULPI */
+ chg_det &= ~(1 << 3);
+ ulpi_write(phy, chg_det, 0x34);
+ /* Turn on chg detect block */
+ chg_det &= ~(1 << 1);
+ ulpi_write(phy, chg_det, 0x34);
+ udelay(20);
+ break;
+ case SNPS_28NM_INTEGRATED_PHY:
+ /* Clear charger detecting control bits */
+ ulpi_write(phy, 0x3F, 0x86);
+ /* Clear alt interrupt latch and enable bits */
+ ulpi_write(phy, 0x1F, 0x92);
+ ulpi_write(phy, 0x1F, 0x95);
+ udelay(100);
+ break;
+ default:
+ break;
+ }
+}
+
+static void msm_chg_block_off(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+ u32 func_ctrl, chg_det;
+
+ switch (motg->pdata->phy_type) {
+ case CI_45NM_INTEGRATED_PHY:
+ chg_det = ulpi_read(phy, 0x34);
+ /* Turn off charger block */
+ chg_det |= ~(1 << 1);
+ ulpi_write(phy, chg_det, 0x34);
+ break;
+ case SNPS_28NM_INTEGRATED_PHY:
+ /* Clear charger detecting control bits */
+ ulpi_write(phy, 0x3F, 0x86);
+ /* Clear alt interrupt latch and enable bits */
+ ulpi_write(phy, 0x1F, 0x92);
+ ulpi_write(phy, 0x1F, 0x95);
+ break;
+ default:
+ break;
+ }
+
+ /* put the controller in normal mode */
+ func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL);
+ func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
+ func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NORMAL;
+ ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL);
+}
+
+#define MSM_CHG_DCD_POLL_TIME (100 * HZ/1000) /* 100 msec */
+#define MSM_CHG_DCD_MAX_RETRIES 6 /* Tdcd_tmout = 6 * 100 msec */
+#define MSM_CHG_PRIMARY_DET_TIME (40 * HZ/1000) /* TVDPSRC_ON */
+#define MSM_CHG_SECONDARY_DET_TIME (40 * HZ/1000) /* TVDMSRC_ON */
+static void msm_chg_detect_work(struct work_struct *w)
+{
+ struct msm_otg *motg = container_of(w, struct msm_otg, chg_work.work);
+ struct usb_phy *phy = &motg->phy;
+ bool is_dcd, tmout, vout;
+ unsigned long delay;
+
+ dev_dbg(phy->dev, "chg detection work\n");
+ switch (motg->chg_state) {
+ case USB_CHG_STATE_UNDEFINED:
+ pm_runtime_get_sync(phy->dev);
+ msm_chg_block_on(motg);
+ msm_chg_enable_dcd(motg);
+ motg->chg_state = USB_CHG_STATE_WAIT_FOR_DCD;
+ motg->dcd_retries = 0;
+ delay = MSM_CHG_DCD_POLL_TIME;
+ break;
+ case USB_CHG_STATE_WAIT_FOR_DCD:
+ is_dcd = msm_chg_check_dcd(motg);
+ tmout = ++motg->dcd_retries == MSM_CHG_DCD_MAX_RETRIES;
+ if (is_dcd || tmout) {
+ msm_chg_disable_dcd(motg);
+ msm_chg_enable_primary_det(motg);
+ delay = MSM_CHG_PRIMARY_DET_TIME;
+ motg->chg_state = USB_CHG_STATE_DCD_DONE;
+ } else {
+ delay = MSM_CHG_DCD_POLL_TIME;
+ }
+ break;
+ case USB_CHG_STATE_DCD_DONE:
+ vout = msm_chg_check_primary_det(motg);
+ if (vout) {
+ msm_chg_enable_secondary_det(motg);
+ delay = MSM_CHG_SECONDARY_DET_TIME;
+ motg->chg_state = USB_CHG_STATE_PRIMARY_DONE;
+ } else {
+ motg->chg_type = USB_SDP_CHARGER;
+ motg->chg_state = USB_CHG_STATE_DETECTED;
+ delay = 0;
+ }
+ break;
+ case USB_CHG_STATE_PRIMARY_DONE:
+ vout = msm_chg_check_secondary_det(motg);
+ if (vout)
+ motg->chg_type = USB_DCP_CHARGER;
+ else
+ motg->chg_type = USB_CDP_CHARGER;
+ motg->chg_state = USB_CHG_STATE_SECONDARY_DONE;
+ /* fall through */
+ case USB_CHG_STATE_SECONDARY_DONE:
+ motg->chg_state = USB_CHG_STATE_DETECTED;
+ case USB_CHG_STATE_DETECTED:
+ msm_chg_block_off(motg);
+ dev_dbg(phy->dev, "charger = %d\n", motg->chg_type);
+ schedule_work(&motg->sm_work);
+ return;
+ default:
+ return;
+ }
+
+ schedule_delayed_work(&motg->chg_work, delay);
+}
+
+/*
+ * We support OTG, Peripheral only and Host only configurations. In case
+ * of OTG, mode switch (host-->peripheral/peripheral-->host) can happen
+ * via Id pin status or user request (debugfs). Id/BSV interrupts are not
+ * enabled when switch is controlled by user and default mode is supplied
+ * by board file, which can be changed by userspace later.
+ */
+static void msm_otg_init_sm(struct msm_otg *motg)
+{
+ struct msm_otg_platform_data *pdata = motg->pdata;
+ u32 otgsc = readl(USB_OTGSC);
+
+ switch (pdata->mode) {
+ case USB_DR_MODE_OTG:
+ if (pdata->otg_control == OTG_PHY_CONTROL) {
+ if (otgsc & OTGSC_ID)
+ set_bit(ID, &motg->inputs);
+ else
+ clear_bit(ID, &motg->inputs);
+
+ if (otgsc & OTGSC_BSV)
+ set_bit(B_SESS_VLD, &motg->inputs);
+ else
+ clear_bit(B_SESS_VLD, &motg->inputs);
+ } else if (pdata->otg_control == OTG_USER_CONTROL) {
+ set_bit(ID, &motg->inputs);
+ clear_bit(B_SESS_VLD, &motg->inputs);
+ }
+ break;
+ case USB_DR_MODE_HOST:
+ clear_bit(ID, &motg->inputs);
+ break;
+ case USB_DR_MODE_PERIPHERAL:
+ set_bit(ID, &motg->inputs);
+ if (otgsc & OTGSC_BSV)
+ set_bit(B_SESS_VLD, &motg->inputs);
+ else
+ clear_bit(B_SESS_VLD, &motg->inputs);
+ break;
+ default:
+ break;
+ }
+}
+
+static void msm_otg_sm_work(struct work_struct *w)
+{
+ struct msm_otg *motg = container_of(w, struct msm_otg, sm_work);
+ struct usb_otg *otg = motg->phy.otg;
+
+ switch (otg->state) {
+ case OTG_STATE_UNDEFINED:
+ dev_dbg(otg->usb_phy->dev, "OTG_STATE_UNDEFINED state\n");
+ msm_otg_reset(otg->usb_phy);
+ msm_otg_init_sm(motg);
+ otg->state = OTG_STATE_B_IDLE;
+ /* FALL THROUGH */
+ case OTG_STATE_B_IDLE:
+ dev_dbg(otg->usb_phy->dev, "OTG_STATE_B_IDLE state\n");
+ if (!test_bit(ID, &motg->inputs) && otg->host) {
+ /* disable BSV bit */
+ writel(readl(USB_OTGSC) & ~OTGSC_BSVIE, USB_OTGSC);
+ msm_otg_start_host(otg->usb_phy, 1);
+ otg->state = OTG_STATE_A_HOST;
+ } else if (test_bit(B_SESS_VLD, &motg->inputs)) {
+ switch (motg->chg_state) {
+ case USB_CHG_STATE_UNDEFINED:
+ msm_chg_detect_work(&motg->chg_work.work);
+ break;
+ case USB_CHG_STATE_DETECTED:
+ switch (motg->chg_type) {
+ case USB_DCP_CHARGER:
+ msm_otg_notify_charger(motg,
+ IDEV_CHG_MAX);
+ break;
+ case USB_CDP_CHARGER:
+ msm_otg_notify_charger(motg,
+ IDEV_CHG_MAX);
+ msm_otg_start_peripheral(otg->usb_phy,
+ 1);
+ otg->state
+ = OTG_STATE_B_PERIPHERAL;
+ break;
+ case USB_SDP_CHARGER:
+ msm_otg_notify_charger(motg, IUNIT);
+ msm_otg_start_peripheral(otg->usb_phy,
+ 1);
+ otg->state
+ = OTG_STATE_B_PERIPHERAL;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ } else {
+ /*
+ * If charger detection work is pending, decrement
+ * the pm usage counter to balance with the one that
+ * is incremented in charger detection work.
+ */
+ if (cancel_delayed_work_sync(&motg->chg_work)) {
+ pm_runtime_put_sync(otg->usb_phy->dev);
+ msm_otg_reset(otg->usb_phy);
+ }
+ msm_otg_notify_charger(motg, 0);
+ motg->chg_state = USB_CHG_STATE_UNDEFINED;
+ motg->chg_type = USB_INVALID_CHARGER;
+ }
+
+ if (otg->state == OTG_STATE_B_IDLE)
+ pm_runtime_put_sync(otg->usb_phy->dev);
+ break;
+ case OTG_STATE_B_PERIPHERAL:
+ dev_dbg(otg->usb_phy->dev, "OTG_STATE_B_PERIPHERAL state\n");
+ if (!test_bit(B_SESS_VLD, &motg->inputs) ||
+ !test_bit(ID, &motg->inputs)) {
+ msm_otg_notify_charger(motg, 0);
+ msm_otg_start_peripheral(otg->usb_phy, 0);
+ motg->chg_state = USB_CHG_STATE_UNDEFINED;
+ motg->chg_type = USB_INVALID_CHARGER;
+ otg->state = OTG_STATE_B_IDLE;
+ msm_otg_reset(otg->usb_phy);
+ schedule_work(w);
+ }
+ break;
+ case OTG_STATE_A_HOST:
+ dev_dbg(otg->usb_phy->dev, "OTG_STATE_A_HOST state\n");
+ if (test_bit(ID, &motg->inputs)) {
+ msm_otg_start_host(otg->usb_phy, 0);
+ otg->state = OTG_STATE_B_IDLE;
+ msm_otg_reset(otg->usb_phy);
+ schedule_work(w);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static irqreturn_t msm_otg_irq(int irq, void *data)
+{
+ struct msm_otg *motg = data;
+ struct usb_phy *phy = &motg->phy;
+ u32 otgsc = 0;
+
+ if (atomic_read(&motg->in_lpm)) {
+ disable_irq_nosync(irq);
+ motg->async_int = 1;
+ pm_runtime_get(phy->dev);
+ return IRQ_HANDLED;
+ }
+
+ otgsc = readl(USB_OTGSC);
+ if (!(otgsc & (OTGSC_IDIS | OTGSC_BSVIS)))
+ return IRQ_NONE;
+
+ if ((otgsc & OTGSC_IDIS) && (otgsc & OTGSC_IDIE)) {
+ if (otgsc & OTGSC_ID)
+ set_bit(ID, &motg->inputs);
+ else
+ clear_bit(ID, &motg->inputs);
+ dev_dbg(phy->dev, "ID set/clear\n");
+ pm_runtime_get_noresume(phy->dev);
+ } else if ((otgsc & OTGSC_BSVIS) && (otgsc & OTGSC_BSVIE)) {
+ if (otgsc & OTGSC_BSV)
+ set_bit(B_SESS_VLD, &motg->inputs);
+ else
+ clear_bit(B_SESS_VLD, &motg->inputs);
+ dev_dbg(phy->dev, "BSV set/clear\n");
+ pm_runtime_get_noresume(phy->dev);
+ }
+
+ writel(otgsc, USB_OTGSC);
+ schedule_work(&motg->sm_work);
+ return IRQ_HANDLED;
+}
+
+static int msm_otg_mode_show(struct seq_file *s, void *unused)
+{
+ struct msm_otg *motg = s->private;
+ struct usb_otg *otg = motg->phy.otg;
+
+ switch (otg->state) {
+ case OTG_STATE_A_HOST:
+ seq_puts(s, "host\n");
+ break;
+ case OTG_STATE_B_PERIPHERAL:
+ seq_puts(s, "peripheral\n");
+ break;
+ default:
+ seq_puts(s, "none\n");
+ break;
+ }
+
+ return 0;
+}
+
+static int msm_otg_mode_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, msm_otg_mode_show, inode->i_private);
+}
+
+static ssize_t msm_otg_mode_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct msm_otg *motg = s->private;
+ char buf[16];
+ struct usb_otg *otg = motg->phy.otg;
+ int status = count;
+ enum usb_dr_mode req_mode;
+
+ memset(buf, 0x00, sizeof(buf));
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) {
+ status = -EFAULT;
+ goto out;
+ }
+
+ if (!strncmp(buf, "host", 4)) {
+ req_mode = USB_DR_MODE_HOST;
+ } else if (!strncmp(buf, "peripheral", 10)) {
+ req_mode = USB_DR_MODE_PERIPHERAL;
+ } else if (!strncmp(buf, "none", 4)) {
+ req_mode = USB_DR_MODE_UNKNOWN;
+ } else {
+ status = -EINVAL;
+ goto out;
+ }
+
+ switch (req_mode) {
+ case USB_DR_MODE_UNKNOWN:
+ switch (otg->state) {
+ case OTG_STATE_A_HOST:
+ case OTG_STATE_B_PERIPHERAL:
+ set_bit(ID, &motg->inputs);
+ clear_bit(B_SESS_VLD, &motg->inputs);
+ break;
+ default:
+ goto out;
+ }
+ break;
+ case USB_DR_MODE_PERIPHERAL:
+ switch (otg->state) {
+ case OTG_STATE_B_IDLE:
+ case OTG_STATE_A_HOST:
+ set_bit(ID, &motg->inputs);
+ set_bit(B_SESS_VLD, &motg->inputs);
+ break;
+ default:
+ goto out;
+ }
+ break;
+ case USB_DR_MODE_HOST:
+ switch (otg->state) {
+ case OTG_STATE_B_IDLE:
+ case OTG_STATE_B_PERIPHERAL:
+ clear_bit(ID, &motg->inputs);
+ break;
+ default:
+ goto out;
+ }
+ break;
+ default:
+ goto out;
+ }
+
+ pm_runtime_get_sync(otg->usb_phy->dev);
+ schedule_work(&motg->sm_work);
+out:
+ return status;
+}
+
+static const struct file_operations msm_otg_mode_fops = {
+ .open = msm_otg_mode_open,
+ .read = seq_read,
+ .write = msm_otg_mode_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct dentry *msm_otg_dbg_root;
+static struct dentry *msm_otg_dbg_mode;
+
+static int msm_otg_debugfs_init(struct msm_otg *motg)
+{
+ msm_otg_dbg_root = debugfs_create_dir("msm_otg", NULL);
+
+ if (!msm_otg_dbg_root || IS_ERR(msm_otg_dbg_root))
+ return -ENODEV;
+
+ msm_otg_dbg_mode = debugfs_create_file("mode", S_IRUGO | S_IWUSR,
+ msm_otg_dbg_root, motg, &msm_otg_mode_fops);
+ if (!msm_otg_dbg_mode) {
+ debugfs_remove(msm_otg_dbg_root);
+ msm_otg_dbg_root = NULL;
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void msm_otg_debugfs_cleanup(void)
+{
+ debugfs_remove(msm_otg_dbg_mode);
+ debugfs_remove(msm_otg_dbg_root);
+}
+
+static const struct of_device_id msm_otg_dt_match[] = {
+ {
+ .compatible = "qcom,usb-otg-ci",
+ .data = (void *) CI_45NM_INTEGRATED_PHY
+ },
+ {
+ .compatible = "qcom,usb-otg-snps",
+ .data = (void *) SNPS_28NM_INTEGRATED_PHY
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, msm_otg_dt_match);
+
+static int msm_otg_vbus_notifier(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct msm_usb_cable *vbus = container_of(nb, struct msm_usb_cable, nb);
+ struct msm_otg *motg = container_of(vbus, struct msm_otg, vbus);
+
+ if (event)
+ set_bit(B_SESS_VLD, &motg->inputs);
+ else
+ clear_bit(B_SESS_VLD, &motg->inputs);
+
+ if (test_bit(B_SESS_VLD, &motg->inputs)) {
+ /* Switch D+/D- lines to Device connector */
+ gpiod_set_value_cansleep(motg->switch_gpio, 0);
+ } else {
+ /* Switch D+/D- lines to Hub */
+ gpiod_set_value_cansleep(motg->switch_gpio, 1);
+ }
+
+ schedule_work(&motg->sm_work);
+
+ return NOTIFY_DONE;
+}
+
+static int msm_otg_id_notifier(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct msm_usb_cable *id = container_of(nb, struct msm_usb_cable, nb);
+ struct msm_otg *motg = container_of(id, struct msm_otg, id);
+
+ if (event)
+ clear_bit(ID, &motg->inputs);
+ else
+ set_bit(ID, &motg->inputs);
+
+ schedule_work(&motg->sm_work);
+
+ return NOTIFY_DONE;
+}
+
+static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg)
+{
+ struct msm_otg_platform_data *pdata;
+ struct extcon_dev *ext_id, *ext_vbus;
+ struct device_node *node = pdev->dev.of_node;
+ struct property *prop;
+ int len, ret, words;
+ u32 val, tmp[3];
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ motg->pdata = pdata;
+
+ pdata->phy_type = (enum msm_usb_phy_type)of_device_get_match_data(&pdev->dev);
+ if (!pdata->phy_type)
+ return 1;
+
+ motg->link_rst = devm_reset_control_get(&pdev->dev, "link");
+ if (IS_ERR(motg->link_rst))
+ return PTR_ERR(motg->link_rst);
+
+ motg->phy_rst = devm_reset_control_get(&pdev->dev, "phy");
+ if (IS_ERR(motg->phy_rst))
+ motg->phy_rst = NULL;
+
+ pdata->mode = usb_get_dr_mode(&pdev->dev);
+ if (pdata->mode == USB_DR_MODE_UNKNOWN)
+ pdata->mode = USB_DR_MODE_OTG;
+
+ pdata->otg_control = OTG_PHY_CONTROL;
+ if (!of_property_read_u32(node, "qcom,otg-control", &val))
+ if (val == OTG_PMIC_CONTROL)
+ pdata->otg_control = val;
+
+ if (!of_property_read_u32(node, "qcom,phy-num", &val) && val < 2)
+ motg->phy_number = val;
+
+ motg->vdd_levels[VDD_LEVEL_NONE] = USB_PHY_SUSP_DIG_VOL;
+ motg->vdd_levels[VDD_LEVEL_MIN] = USB_PHY_VDD_DIG_VOL_MIN;
+ motg->vdd_levels[VDD_LEVEL_MAX] = USB_PHY_VDD_DIG_VOL_MAX;
+
+ if (of_get_property(node, "qcom,vdd-levels", &len) &&
+ len == sizeof(tmp)) {
+ of_property_read_u32_array(node, "qcom,vdd-levels",
+ tmp, len / sizeof(*tmp));
+ motg->vdd_levels[VDD_LEVEL_NONE] = tmp[VDD_LEVEL_NONE];
+ motg->vdd_levels[VDD_LEVEL_MIN] = tmp[VDD_LEVEL_MIN];
+ motg->vdd_levels[VDD_LEVEL_MAX] = tmp[VDD_LEVEL_MAX];
+ }
+
+ motg->manual_pullup = of_property_read_bool(node, "qcom,manual-pullup");
+
+ motg->switch_gpio = devm_gpiod_get_optional(&pdev->dev, "switch",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(motg->switch_gpio))
+ return PTR_ERR(motg->switch_gpio);
+
+ ext_id = ERR_PTR(-ENODEV);
+ ext_vbus = ERR_PTR(-ENODEV);
+ if (of_property_read_bool(node, "extcon")) {
+
+ /* Each one of them is not mandatory */
+ ext_vbus = extcon_get_edev_by_phandle(&pdev->dev, 0);
+ if (IS_ERR(ext_vbus) && PTR_ERR(ext_vbus) != -ENODEV)
+ return PTR_ERR(ext_vbus);
+
+ ext_id = extcon_get_edev_by_phandle(&pdev->dev, 1);
+ if (IS_ERR(ext_id) && PTR_ERR(ext_id) != -ENODEV)
+ return PTR_ERR(ext_id);
+ }
+
+ if (!IS_ERR(ext_vbus)) {
+ motg->vbus.extcon = ext_vbus;
+ motg->vbus.nb.notifier_call = msm_otg_vbus_notifier;
+ ret = extcon_register_notifier(ext_vbus, EXTCON_USB,
+ &motg->vbus.nb);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "register VBUS notifier failed\n");
+ return ret;
+ }
+
+ ret = extcon_get_cable_state_(ext_vbus, EXTCON_USB);
+ if (ret)
+ set_bit(B_SESS_VLD, &motg->inputs);
+ else
+ clear_bit(B_SESS_VLD, &motg->inputs);
+ }
+
+ if (!IS_ERR(ext_id)) {
+ motg->id.extcon = ext_id;
+ motg->id.nb.notifier_call = msm_otg_id_notifier;
+ ret = extcon_register_notifier(ext_id, EXTCON_USB_HOST,
+ &motg->id.nb);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "register ID notifier failed\n");
+ extcon_unregister_notifier(motg->vbus.extcon,
+ EXTCON_USB, &motg->vbus.nb);
+ return ret;
+ }
+
+ ret = extcon_get_cable_state_(ext_id, EXTCON_USB_HOST);
+ if (ret)
+ clear_bit(ID, &motg->inputs);
+ else
+ set_bit(ID, &motg->inputs);
+ }
+
+ prop = of_find_property(node, "qcom,phy-init-sequence", &len);
+ if (!prop || !len)
+ return 0;
+
+ words = len / sizeof(u32);
+
+ if (words >= ULPI_EXT_VENDOR_SPECIFIC) {
+ dev_warn(&pdev->dev, "Too big PHY init sequence %d\n", words);
+ return 0;
+ }
+
+ pdata->phy_init_seq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ if (!pdata->phy_init_seq)
+ return 0;
+
+ ret = of_property_read_u32_array(node, "qcom,phy-init-sequence",
+ pdata->phy_init_seq, words);
+ if (!ret)
+ pdata->phy_init_sz = words;
+
+ return 0;
+}
+
+static int msm_otg_reboot_notify(struct notifier_block *this,
+ unsigned long code, void *unused)
+{
+ struct msm_otg *motg = container_of(this, struct msm_otg, reboot);
+
+ /*
+ * Ensure that D+/D- lines are routed to uB connector, so
+ * we could load bootloader/kernel at next reboot
+ */
+ gpiod_set_value_cansleep(motg->switch_gpio, 0);
+ return NOTIFY_DONE;
+}
+
+static int msm_otg_probe(struct platform_device *pdev)
+{
+ struct regulator_bulk_data regs[3];
+ int ret = 0;
+ struct device_node *np = pdev->dev.of_node;
+ struct msm_otg_platform_data *pdata;
+ struct resource *res;
+ struct msm_otg *motg;
+ struct usb_phy *phy;
+ void __iomem *phy_select;
+
+ motg = devm_kzalloc(&pdev->dev, sizeof(struct msm_otg), GFP_KERNEL);
+ if (!motg)
+ return -ENOMEM;
+
+ motg->phy.otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg),
+ GFP_KERNEL);
+ if (!motg->phy.otg)
+ return -ENOMEM;
+
+ phy = &motg->phy;
+ phy->dev = &pdev->dev;
+
+ motg->clk = devm_clk_get(&pdev->dev, np ? "core" : "usb_hs_clk");
+ if (IS_ERR(motg->clk)) {
+ dev_err(&pdev->dev, "failed to get usb_hs_clk\n");
+ return PTR_ERR(motg->clk);
+ }
+
+ /*
+ * If USB Core is running its protocol engine based on CORE CLK,
+ * CORE CLK must be running at >55Mhz for correct HSUSB
+ * operation and USB core cannot tolerate frequency changes on
+ * CORE CLK.
+ */
+ motg->pclk = devm_clk_get(&pdev->dev, np ? "iface" : "usb_hs_pclk");
+ if (IS_ERR(motg->pclk)) {
+ dev_err(&pdev->dev, "failed to get usb_hs_pclk\n");
+ return PTR_ERR(motg->pclk);
+ }
+
+ /*
+ * USB core clock is not present on all MSM chips. This
+ * clock is introduced to remove the dependency on AXI
+ * bus frequency.
+ */
+ motg->core_clk = devm_clk_get(&pdev->dev,
+ np ? "alt_core" : "usb_hs_core_clk");
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+ motg->regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!motg->regs)
+ return -ENOMEM;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ if (!np)
+ return -ENXIO;
+ ret = msm_otg_read_dt(pdev, motg);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * NOTE: The PHYs can be multiplexed between the chipidea controller
+ * and the dwc3 controller, using a single bit. It is important that
+ * the dwc3 driver does not set this bit in an incompatible way.
+ */
+ if (motg->phy_number) {
+ phy_select = devm_ioremap_nocache(&pdev->dev, USB2_PHY_SEL, 4);
+ if (!phy_select) {
+ ret = -ENOMEM;
+ goto unregister_extcon;
+ }
+ /* Enable second PHY with the OTG port */
+ writel(0x1, phy_select);
+ }
+
+ dev_info(&pdev->dev, "OTG regs = %p\n", motg->regs);
+
+ motg->irq = platform_get_irq(pdev, 0);
+ if (motg->irq < 0) {
+ dev_err(&pdev->dev, "platform_get_irq failed\n");
+ ret = motg->irq;
+ goto unregister_extcon;
+ }
+
+ regs[0].supply = "vddcx";
+ regs[1].supply = "v3p3";
+ regs[2].supply = "v1p8";
+
+ ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(regs), regs);
+ if (ret)
+ goto unregister_extcon;
+
+ motg->vddcx = regs[0].consumer;
+ motg->v3p3 = regs[1].consumer;
+ motg->v1p8 = regs[2].consumer;
+
+ clk_set_rate(motg->clk, 60000000);
+
+ clk_prepare_enable(motg->clk);
+ clk_prepare_enable(motg->pclk);
+
+ if (!IS_ERR(motg->core_clk))
+ clk_prepare_enable(motg->core_clk);
+
+ ret = msm_hsusb_init_vddcx(motg, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "hsusb vddcx configuration failed\n");
+ goto disable_clks;
+ }
+
+ ret = msm_hsusb_ldo_init(motg, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "hsusb vreg configuration failed\n");
+ goto disable_vddcx;
+ }
+ ret = msm_hsusb_ldo_set_mode(motg, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "hsusb vreg enable failed\n");
+ goto disable_ldo;
+ }
+
+ writel(0, USB_USBINTR);
+ writel(0, USB_OTGSC);
+
+ INIT_WORK(&motg->sm_work, msm_otg_sm_work);
+ INIT_DELAYED_WORK(&motg->chg_work, msm_chg_detect_work);
+ ret = devm_request_irq(&pdev->dev, motg->irq, msm_otg_irq, IRQF_SHARED,
+ "msm_otg", motg);
+ if (ret) {
+ dev_err(&pdev->dev, "request irq failed\n");
+ goto disable_ldo;
+ }
+
+ phy->init = msm_phy_init;
+ phy->set_power = msm_otg_set_power;
+ phy->notify_disconnect = msm_phy_notify_disconnect;
+ phy->type = USB_PHY_TYPE_USB2;
+
+ phy->io_ops = &msm_otg_io_ops;
+
+ phy->otg->usb_phy = &motg->phy;
+ phy->otg->set_host = msm_otg_set_host;
+ phy->otg->set_peripheral = msm_otg_set_peripheral;
+
+ msm_usb_reset(phy);
+
+ ret = usb_add_phy_dev(&motg->phy);
+ if (ret) {
+ dev_err(&pdev->dev, "usb_add_phy failed\n");
+ goto disable_ldo;
+ }
+
+ platform_set_drvdata(pdev, motg);
+ device_init_wakeup(&pdev->dev, 1);
+
+ if (motg->pdata->mode == USB_DR_MODE_OTG &&
+ motg->pdata->otg_control == OTG_USER_CONTROL) {
+ ret = msm_otg_debugfs_init(motg);
+ if (ret)
+ dev_dbg(&pdev->dev, "Can not create mode change file\n");
+ }
+
+ if (test_bit(B_SESS_VLD, &motg->inputs)) {
+ /* Switch D+/D- lines to Device connector */
+ gpiod_set_value_cansleep(motg->switch_gpio, 0);
+ } else {
+ /* Switch D+/D- lines to Hub */
+ gpiod_set_value_cansleep(motg->switch_gpio, 1);
+ }
+
+ motg->reboot.notifier_call = msm_otg_reboot_notify;
+ register_reboot_notifier(&motg->reboot);
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ return 0;
+
+disable_ldo:
+ msm_hsusb_ldo_init(motg, 0);
+disable_vddcx:
+ msm_hsusb_init_vddcx(motg, 0);
+disable_clks:
+ clk_disable_unprepare(motg->pclk);
+ clk_disable_unprepare(motg->clk);
+ if (!IS_ERR(motg->core_clk))
+ clk_disable_unprepare(motg->core_clk);
+unregister_extcon:
+ extcon_unregister_notifier(motg->id.extcon,
+ EXTCON_USB_HOST, &motg->id.nb);
+ extcon_unregister_notifier(motg->vbus.extcon,
+ EXTCON_USB, &motg->vbus.nb);
+
+ return ret;
+}
+
+static int msm_otg_remove(struct platform_device *pdev)
+{
+ struct msm_otg *motg = platform_get_drvdata(pdev);
+ struct usb_phy *phy = &motg->phy;
+ int cnt = 0;
+
+ if (phy->otg->host || phy->otg->gadget)
+ return -EBUSY;
+
+ unregister_reboot_notifier(&motg->reboot);
+
+ /*
+ * Ensure that D+/D- lines are routed to uB connector, so
+ * we could load bootloader/kernel at next reboot
+ */
+ gpiod_set_value_cansleep(motg->switch_gpio, 0);
+
+ extcon_unregister_notifier(motg->id.extcon, EXTCON_USB_HOST, &motg->id.nb);
+ extcon_unregister_notifier(motg->vbus.extcon, EXTCON_USB, &motg->vbus.nb);
+
+ msm_otg_debugfs_cleanup();
+ cancel_delayed_work_sync(&motg->chg_work);
+ cancel_work_sync(&motg->sm_work);
+
+ pm_runtime_resume(&pdev->dev);
+
+ device_init_wakeup(&pdev->dev, 0);
+ pm_runtime_disable(&pdev->dev);
+
+ usb_remove_phy(phy);
+ disable_irq(motg->irq);
+
+ /*
+ * Put PHY in low power mode.
+ */
+ ulpi_read(phy, 0x14);
+ ulpi_write(phy, 0x08, 0x09);
+
+ writel(readl(USB_PORTSC) | PORTSC_PHCD, USB_PORTSC);
+ while (cnt < PHY_SUSPEND_TIMEOUT_USEC) {
+ if (readl(USB_PORTSC) & PORTSC_PHCD)
+ break;
+ udelay(1);
+ cnt++;
+ }
+ if (cnt >= PHY_SUSPEND_TIMEOUT_USEC)
+ dev_err(phy->dev, "Unable to suspend PHY\n");
+
+ clk_disable_unprepare(motg->pclk);
+ clk_disable_unprepare(motg->clk);
+ if (!IS_ERR(motg->core_clk))
+ clk_disable_unprepare(motg->core_clk);
+ msm_hsusb_ldo_init(motg, 0);
+
+ pm_runtime_set_suspended(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int msm_otg_runtime_idle(struct device *dev)
+{
+ struct msm_otg *motg = dev_get_drvdata(dev);
+ struct usb_otg *otg = motg->phy.otg;
+
+ dev_dbg(dev, "OTG runtime idle\n");
+
+ /*
+ * It is observed some times that a spurious interrupt
+ * comes when PHY is put into LPM immediately after PHY reset.
+ * This 1 sec delay also prevents entering into LPM immediately
+ * after asynchronous interrupt.
+ */
+ if (otg->state != OTG_STATE_UNDEFINED)
+ pm_schedule_suspend(dev, 1000);
+
+ return -EAGAIN;
+}
+
+static int msm_otg_runtime_suspend(struct device *dev)
+{
+ struct msm_otg *motg = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "OTG runtime suspend\n");
+ return msm_otg_suspend(motg);
+}
+
+static int msm_otg_runtime_resume(struct device *dev)
+{
+ struct msm_otg *motg = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "OTG runtime resume\n");
+ return msm_otg_resume(motg);
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int msm_otg_pm_suspend(struct device *dev)
+{
+ struct msm_otg *motg = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "OTG PM suspend\n");
+ return msm_otg_suspend(motg);
+}
+
+static int msm_otg_pm_resume(struct device *dev)
+{
+ struct msm_otg *motg = dev_get_drvdata(dev);
+ int ret;
+
+ dev_dbg(dev, "OTG PM resume\n");
+
+ ret = msm_otg_resume(motg);
+ if (ret)
+ return ret;
+
+ /*
+ * Runtime PM Documentation recommends bringing the
+ * device to full powered state upon resume.
+ */
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops msm_otg_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(msm_otg_pm_suspend, msm_otg_pm_resume)
+ SET_RUNTIME_PM_OPS(msm_otg_runtime_suspend, msm_otg_runtime_resume,
+ msm_otg_runtime_idle)
+};
+
+static struct platform_driver msm_otg_driver = {
+ .probe = msm_otg_probe,
+ .remove = msm_otg_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &msm_otg_dev_pm_ops,
+ .of_match_table = msm_otg_dt_match,
+ },
+};
+
+module_platform_driver(msm_otg_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM USB transceiver driver");
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 8a34759..6170656 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2009-2017, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -9,17 +9,10 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
*/
#include <linux/module.h>
#include <linux/device.h>
-#include <linux/extcon.h>
-#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/slab.h>
@@ -28,25 +21,65 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/ioport.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/reboot.h>
+#include <linux/dma-mapping.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/irqchip/msm-mpm-irq.h>
+#include <linux/pm_wakeup.h>
#include <linux/reset.h>
-#include <linux/types.h>
-#include <linux/usb/otg.h>
+#include <linux/extcon.h>
+#include <soc/qcom/scm.h>
#include <linux/usb.h>
#include <linux/usb/otg.h>
-#include <linux/usb/of.h>
#include <linux/usb/ulpi.h>
#include <linux/usb/gadget.h>
#include <linux/usb/hcd.h>
+#include <linux/usb/msm_hsusb.h>
#include <linux/usb/msm_hsusb_hw.h>
#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/qpnp/qpnp-adc.h>
+
+#include <linux/msm-bus.h>
+
+/**
+ * Requested USB votes for BUS bandwidth
+ *
+ * USB_NO_PERF_VOTE BUS Vote for inactive USB session or disconnect
+ * USB_MAX_PERF_VOTE Maximum BUS bandwidth vote
+ * USB_MIN_PERF_VOTE Minimum BUS bandwidth vote (for some hw same as NO_PERF)
+ *
+ */
+enum usb_bus_vote {
+ USB_NO_PERF_VOTE = 0,
+ USB_MAX_PERF_VOTE,
+ USB_MIN_PERF_VOTE,
+};
+
+/**
+ * Supported USB modes
+ *
+ * USB_PERIPHERAL Only peripheral mode is supported.
+ * USB_HOST Only host mode is supported.
+ * USB_OTG OTG mode is supported.
+ *
+ */
+enum usb_mode_type {
+ USB_NONE = 0,
+ USB_PERIPHERAL,
+ USB_HOST,
+ USB_OTG,
+};
/**
* OTG control
@@ -69,165 +102,177 @@
* PHY used in
*
* INVALID_PHY Unsupported PHY
- * CI_45NM_INTEGRATED_PHY Chipidea 45nm integrated PHY
- * SNPS_28NM_INTEGRATED_PHY Synopsis 28nm integrated PHY
+ * CI_PHY Chipidea PHY
+ * SNPS_PICO_PHY Synopsis Pico PHY
+ * SNPS_FEMTO_PHY Synopsis Femto PHY
+ * QUSB_ULPI_PHY
*
*/
enum msm_usb_phy_type {
INVALID_PHY = 0,
- CI_45NM_INTEGRATED_PHY,
- SNPS_28NM_INTEGRATED_PHY,
+ CI_PHY, /* not supported */
+ SNPS_PICO_PHY,
+ SNPS_FEMTO_PHY,
+ QUSB_ULPI_PHY,
};
#define IDEV_CHG_MAX 1500
#define IUNIT 100
+#define IDEV_HVDCP_CHG_MAX 1800
/**
- * Different states involved in USB charger detection.
- *
- * USB_CHG_STATE_UNDEFINED USB charger is not connected or detection
- * process is not yet started.
- * USB_CHG_STATE_WAIT_FOR_DCD Waiting for Data pins contact.
- * USB_CHG_STATE_DCD_DONE Data pin contact is detected.
- * USB_CHG_STATE_PRIMARY_DONE Primary detection is completed (Detects
- * between SDP and DCP/CDP).
- * USB_CHG_STATE_SECONDARY_DONE Secondary detection is completed (Detects
- * between DCP and CDP).
- * USB_CHG_STATE_DETECTED USB charger type is determined.
- *
+ * Used different VDDCX voltage values
*/
-enum usb_chg_state {
- USB_CHG_STATE_UNDEFINED = 0,
- USB_CHG_STATE_WAIT_FOR_DCD,
- USB_CHG_STATE_DCD_DONE,
- USB_CHG_STATE_PRIMARY_DONE,
- USB_CHG_STATE_SECONDARY_DONE,
- USB_CHG_STATE_DETECTED,
-};
-
-/**
- * USB charger types
- *
- * USB_INVALID_CHARGER Invalid USB charger.
- * USB_SDP_CHARGER Standard downstream port. Refers to a downstream port
- * on USB2.0 compliant host/hub.
- * USB_DCP_CHARGER Dedicated charger port (AC charger/ Wall charger).
- * USB_CDP_CHARGER Charging downstream port. Enumeration can happen and
- * IDEV_CHG_MAX can be drawn irrespective of USB state.
- *
- */
-enum usb_chg_type {
- USB_INVALID_CHARGER = 0,
- USB_SDP_CHARGER,
- USB_DCP_CHARGER,
- USB_CDP_CHARGER,
+enum usb_vdd_value {
+ VDD_NONE = 0,
+ VDD_MIN,
+ VDD_MAX,
+ VDD_VAL_MAX,
};
/**
* struct msm_otg_platform_data - platform device data
* for msm_otg driver.
* @phy_init_seq: PHY configuration sequence values. Value of -1 is reserved as
- * "do not overwrite default vaule at this address".
- * @phy_init_sz: PHY configuration sequence size.
- * @vbus_power: VBUS power on/off routine.
+ * "do not overwrite default value at this address".
+ * @vbus_power: VBUS power on/off routine.It should return result
+ * as success(zero value) or failure(non-zero value).
* @power_budget: VBUS power budget in mA (0 will be treated as 500mA).
* @mode: Supported mode (OTG/peripheral/host).
* @otg_control: OTG switch controlled by user/Id pin
+ * @default_mode: Default operational mode. Applicable only if
+ * OTG switch is controller by user.
+ * @pmic_id_irq: IRQ number assigned for PMIC USB ID line.
+ * @mpm_otgsessvld_int: MPM wakeup pin assigned for OTG SESSVLD
+ * interrupt. Used when .otg_control == OTG_PHY_CONTROL.
+ * @mpm_dpshv_int: MPM wakeup pin assigned for DP SHV interrupt.
+ * Used during host bus suspend.
+ * @mpm_dmshv_int: MPM wakeup pin assigned for DM SHV interrupt.
+ * Used during host bus suspend.
+ * @disable_reset_on_disconnect: perform USB PHY and LINK reset
+ * on USB cable disconnection.
+ * @pnoc_errata_fix: workaround needed for PNOC hardware bug that
+ * affects USB performance.
+ * @enable_lpm_on_suspend: Enable the USB core to go into Low
+ * Power Mode, when USB bus is suspended but cable
+ * is connected.
+ * @core_clk_always_on_workaround: Don't disable core_clk when
+ * USB enters LPM.
+ * @delay_lpm_on_disconnect: Use a delay before entering LPM
+ * upon USB cable disconnection.
+ * @enable_sec_phy: Use second HSPHY with USB2 core
+ * @bus_scale_table: parameters for bus bandwidth requirements
+ * @log2_itc: value of 2^(log2_itc-1) will be used as the
+ * interrupt threshold (ITC), when log2_itc is
+ * between 1 to 7.
+ * @l1_supported: enable link power management support.
+ * @dpdm_pulldown_added: Indicates whether pull down resistors are
+ * connected on data lines or not.
+ * @vddmin_gpio: dedictaed gpio in the platform that is used for
+ * pullup the D+ line in case of bus suspend with
+ * phy retention.
+ * @enable_ahb2ahb_bypass: Indicates whether enable AHB2AHB BYPASS
+ * mode with controller in device mode.
+ * @bool disable_retention_with_vdd_min: Indicates whether to enable
+ allowing VDDmin without putting PHY into retention.
+ * @bool enable_phy_id_pullup: Indicates whether phy id pullup is
+ enabled or not.
+ * @usb_id_gpio: Gpio used for USB ID detection.
+ * @hub_reset_gpio: Gpio used for hub reset.
+ * @switch_sel_gpio: Gpio used for controlling switch that
+ routing D+/D- from the USB HUB to the USB jack type B
+ for peripheral mode.
+ * @bool phy_dvdd_always_on: PHY DVDD is supplied by always on PMIC LDO.
+ * @bool emulation: Indicates whether we are running on emulation platform.
+ * @bool enable_streaming: Indicates whether streaming to be enabled by default.
+ * @bool enable_axi_prefetch: Indicates whether AXI Prefetch interface is used
+ for improving data performance.
+ * @bool enable_sdp_typec_current_limit: Indicates whether type-c current for
+ sdp charger to be limited.
+ * @usbeth_reset_gpio: Gpio used for external usb-to-eth reset.
*/
struct msm_otg_platform_data {
int *phy_init_seq;
int phy_init_sz;
- void (*vbus_power)(bool on);
- unsigned power_budget;
- enum usb_dr_mode mode;
+ int (*vbus_power)(bool on);
+ unsigned int power_budget;
+ enum usb_mode_type mode;
enum otg_control_type otg_control;
+ enum usb_mode_type default_mode;
enum msm_usb_phy_type phy_type;
- void (*setup_gpio)(enum usb_otg_state state);
+ int pmic_id_irq;
+ unsigned int mpm_otgsessvld_int;
+ unsigned int mpm_dpshv_int;
+ unsigned int mpm_dmshv_int;
+ bool disable_reset_on_disconnect;
+ bool pnoc_errata_fix;
+ bool enable_lpm_on_dev_suspend;
+ bool core_clk_always_on_workaround;
+ bool delay_lpm_on_disconnect;
+ bool dp_manual_pullup;
+ bool enable_sec_phy;
+ struct msm_bus_scale_pdata *bus_scale_table;
+ int log2_itc;
+ bool l1_supported;
+ bool dpdm_pulldown_added;
+ int vddmin_gpio;
+ bool enable_ahb2ahb_bypass;
+ bool disable_retention_with_vdd_min;
+ bool enable_phy_id_pullup;
+ int usb_id_gpio;
+ int hub_reset_gpio;
+ int usbeth_reset_gpio;
+ int switch_sel_gpio;
+ bool phy_dvdd_always_on;
+ bool emulation;
+ bool enable_streaming;
+ bool enable_axi_prefetch;
+ bool enable_sdp_typec_current_limit;
+ bool vbus_low_as_hostmode;
};
+#define USB_CHG_BLOCK_ULPI 1
+
+#define USB_REQUEST_5V 1
+#define USB_REQUEST_9V 2
/**
- * struct msm_usb_cable - structure for exteternal connector cable
- * state tracking
- * @nb: hold event notification callback
- * @conn: used for notification registration
+ * struct msm_usb_chg_info - MSM USB charger block details.
+ * @chg_block_type: The type of charger block. QSCRATCH/ULPI.
+ * @page_offset: USB charger register base may not be aligned to
+ * PAGE_SIZE. The kernel driver aligns the base
+ * address and use it for memory mapping. This
+ * page_offset is used by user space to calaculate
+ * the corret charger register base address.
+ * @length: The length of the charger register address space.
*/
-struct msm_usb_cable {
- struct notifier_block nb;
- struct extcon_dev *extcon;
+struct msm_usb_chg_info {
+ uint32_t chg_block_type;
+ __kernel_off_t page_offset;
+ size_t length;
};
-/**
- * struct msm_otg: OTG driver data. Shared by HCD and DCD.
- * @otg: USB OTG Transceiver structure.
- * @pdata: otg device platform data.
- * @irq: IRQ number assigned for HSUSB controller.
- * @clk: clock struct of usb_hs_clk.
- * @pclk: clock struct of usb_hs_pclk.
- * @core_clk: clock struct of usb_hs_core_clk.
- * @regs: ioremapped register base address.
- * @inputs: OTG state machine inputs(Id, SessValid etc).
- * @sm_work: OTG state machine work.
- * @in_lpm: indicates low power mode (LPM) state.
- * @async_int: Async interrupt arrived.
- * @cur_power: The amount of mA available from downstream port.
- * @chg_work: Charger detection work.
- * @chg_state: The state of charger detection process.
- * @chg_type: The type of charger attached.
- * @dcd_retires: The retry count used to track Data contact
- * detection process.
- * @manual_pullup: true if VBUS is not routed to USB controller/phy
- * and controller driver therefore enables pull-up explicitly before
- * starting controller using usbcmd run/stop bit.
- * @vbus: VBUS signal state trakining, using extcon framework
- * @id: ID signal state trakining, using extcon framework
- * @switch_gpio: Descriptor for GPIO used to control external Dual
- * SPDT USB Switch.
- * @reboot: Used to inform the driver to route USB D+/D- line to Device
- * connector
- */
-struct msm_otg {
- struct usb_phy phy;
- struct msm_otg_platform_data *pdata;
- int irq;
- struct clk *clk;
- struct clk *pclk;
- struct clk *core_clk;
- void __iomem *regs;
-#define ID 0
-#define B_SESS_VLD 1
- unsigned long inputs;
- struct work_struct sm_work;
- atomic_t in_lpm;
- int async_int;
- unsigned cur_power;
- int phy_number;
- struct delayed_work chg_work;
- enum usb_chg_state chg_state;
- enum usb_chg_type chg_type;
- u8 dcd_retries;
- struct regulator *v3p3;
- struct regulator *v1p8;
- struct regulator *vddcx;
+/* Get the MSM USB charger block information */
+#define MSM_USB_EXT_CHG_INFO _IOW('M', 0, struct msm_usb_chg_info)
- struct reset_control *phy_rst;
- struct reset_control *link_rst;
- int vdd_levels[3];
+/* Vote against USB hardware low power mode */
+#define MSM_USB_EXT_CHG_BLOCK_LPM _IOW('M', 1, int)
- bool manual_pullup;
+/* To tell kernel about voltage being voted */
+#define MSM_USB_EXT_CHG_VOLTAGE_INFO _IOW('M', 2, int)
- struct msm_usb_cable vbus;
- struct msm_usb_cable id;
+/* To tell kernel about voltage request result */
+#define MSM_USB_EXT_CHG_RESULT _IOW('M', 3, int)
- struct gpio_desc *switch_gpio;
- struct notifier_block reboot;
-};
+/* To tell kernel whether charger connected is external charger or not */
+#define MSM_USB_EXT_CHG_TYPE _IOW('M', 4, int)
#define MSM_USB_BASE (motg->regs)
+#define MSM_USB_PHY_CSR_BASE (motg->phy_csr_regs)
+
#define DRIVER_NAME "msm_otg"
+#define CHG_RECHECK_DELAY (jiffies + msecs_to_jiffies(2000))
#define ULPI_IO_TIMEOUT_USEC (10 * 1000)
-#define LINK_RESET_TIMEOUT_USEC (250 * 1000)
-
#define USB_PHY_3P3_VOL_MIN 3050000 /* uV */
#define USB_PHY_3P3_VOL_MAX 3300000 /* uV */
#define USB_PHY_3P3_HPM_LOAD 50000 /* uA */
@@ -238,43 +283,106 @@
#define USB_PHY_1P8_HPM_LOAD 50000 /* uA */
#define USB_PHY_1P8_LPM_LOAD 4000 /* uA */
-#define USB_PHY_VDD_DIG_VOL_MIN 1000000 /* uV */
+#define USB_PHY_VDD_DIG_VOL_NONE 0 /*uV */
+#define USB_PHY_VDD_DIG_VOL_MIN 1045000 /* uV */
#define USB_PHY_VDD_DIG_VOL_MAX 1320000 /* uV */
-#define USB_PHY_SUSP_DIG_VOL 500000 /* uV */
-enum vdd_levels {
- VDD_LEVEL_NONE = 0,
- VDD_LEVEL_MIN,
- VDD_LEVEL_MAX,
+#define USB_SUSPEND_DELAY_TIME (500 * HZ/1000) /* 500 msec */
+
+#define USB_DEFAULT_SYSTEM_CLOCK 80000000 /* 80 MHz */
+
+#define PM_QOS_SAMPLE_SEC 2
+#define PM_QOS_THRESHOLD 400
+
+#define MICRO_5V 5000000
+#define MICRO_9V 9000000
+
+#define SDP_CURRENT_UA 500000
+#define CDP_CURRENT_UA 1500000
+#define DCP_CURRENT_UA 1500000
+#define HVDCP_CURRENT_UA 3000000
+
+enum msm_otg_phy_reg_mode {
+ USB_PHY_REG_OFF,
+ USB_PHY_REG_ON,
+ USB_PHY_REG_LPM_ON,
+ USB_PHY_REG_LPM_OFF,
+ USB_PHY_REG_3P3_ON,
+ USB_PHY_REG_3P3_OFF,
};
-static int msm_hsusb_init_vddcx(struct msm_otg *motg, int init)
+static char *override_phy_init;
+module_param(override_phy_init, charp, 0644);
+MODULE_PARM_DESC(override_phy_init,
+ "Override HSUSB PHY Init Settings");
+
+unsigned int lpm_disconnect_thresh = 1000;
+module_param(lpm_disconnect_thresh, uint, 0644);
+MODULE_PARM_DESC(lpm_disconnect_thresh,
+ "Delay before entering LPM on USB disconnect");
+
+static bool floated_charger_enable;
+module_param(floated_charger_enable, bool, 0644);
+MODULE_PARM_DESC(floated_charger_enable,
+ "Whether to enable floated charger");
+
+/* by default debugging is enabled */
+static unsigned int enable_dbg_log = 1;
+module_param(enable_dbg_log, uint, 0644);
+MODULE_PARM_DESC(enable_dbg_log, "Debug buffer events");
+
+/* Max current to be drawn for HVDCP charger */
+static int hvdcp_max_current = IDEV_HVDCP_CHG_MAX;
+module_param(hvdcp_max_current, int, 0644);
+MODULE_PARM_DESC(hvdcp_max_current, "max current drawn for HVDCP charger");
+
+/* Max current to be drawn for DCP charger */
+static int dcp_max_current = IDEV_CHG_MAX;
+module_param(dcp_max_current, int, 0644);
+MODULE_PARM_DESC(dcp_max_current, "max current drawn for DCP charger");
+
+static DECLARE_COMPLETION(pmic_vbus_init);
+static struct msm_otg *the_msm_otg;
+static bool debug_bus_voting_enabled;
+
+static struct regulator *hsusb_3p3;
+static struct regulator *hsusb_1p8;
+static struct regulator *hsusb_vdd;
+static struct regulator *vbus_otg;
+static struct power_supply *psy;
+
+static int vdd_val[VDD_VAL_MAX];
+static u32 bus_freqs[USB_NOC_NUM_VOTE][USB_NUM_BUS_CLOCKS] /*bimc,snoc,pcnoc*/;
+static char bus_clkname[USB_NUM_BUS_CLOCKS][20] = {"bimc_clk", "snoc_clk",
+ "pcnoc_clk"};
+static bool bus_clk_rate_set;
+
+static void dbg_inc(unsigned int *idx)
{
- int ret = 0;
+ *idx = (*idx + 1) & (DEBUG_MAX_MSG-1);
+}
- if (init) {
- ret = regulator_set_voltage(motg->vddcx,
- motg->vdd_levels[VDD_LEVEL_MIN],
- motg->vdd_levels[VDD_LEVEL_MAX]);
- if (ret) {
- dev_err(motg->phy.dev, "Cannot set vddcx voltage\n");
- return ret;
- }
+static void
+msm_otg_dbg_log_event(struct usb_phy *phy, char *event, int d1, int d2)
+{
+ struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
+ unsigned long flags;
+ unsigned long long t;
+ unsigned long nanosec;
- ret = regulator_enable(motg->vddcx);
- if (ret)
- dev_err(motg->phy.dev, "unable to enable hsusb vddcx\n");
- } else {
- ret = regulator_set_voltage(motg->vddcx, 0,
- motg->vdd_levels[VDD_LEVEL_MAX]);
- if (ret)
- dev_err(motg->phy.dev, "Cannot set vddcx voltage\n");
- ret = regulator_disable(motg->vddcx);
- if (ret)
- dev_err(motg->phy.dev, "unable to disable hsusb vddcx\n");
- }
+ if (!enable_dbg_log)
+ return;
- return ret;
+ write_lock_irqsave(&motg->dbg_lock, flags);
+ t = cpu_clock(smp_processor_id());
+ nanosec = do_div(t, 1000000000)/1000;
+ scnprintf(motg->buf[motg->dbg_idx], DEBUG_MSG_LEN,
+ "[%5lu.%06lu]: %s :%d:%d",
+ (unsigned long)t, nanosec, event, d1, d2);
+
+ motg->dbg_idx++;
+ motg->dbg_idx = motg->dbg_idx % DEBUG_MAX_MSG;
+ write_unlock_irqrestore(&motg->dbg_lock, flags);
}
static int msm_hsusb_ldo_init(struct msm_otg *motg, int init)
@@ -282,65 +390,194 @@
int rc = 0;
if (init) {
- rc = regulator_set_voltage(motg->v3p3, USB_PHY_3P3_VOL_MIN,
+ hsusb_3p3 = devm_regulator_get(motg->phy.dev, "HSUSB_3p3");
+ if (IS_ERR(hsusb_3p3)) {
+ dev_err(motg->phy.dev, "unable to get hsusb 3p3\n");
+ return PTR_ERR(hsusb_3p3);
+ }
+
+ rc = regulator_set_voltage(hsusb_3p3, USB_PHY_3P3_VOL_MIN,
USB_PHY_3P3_VOL_MAX);
if (rc) {
- dev_err(motg->phy.dev, "Cannot set v3p3 voltage\n");
- goto exit;
+ dev_err(motg->phy.dev, "unable to set voltage level for hsusb 3p3\n"
+ );
+ return rc;
}
- rc = regulator_enable(motg->v3p3);
- if (rc) {
- dev_err(motg->phy.dev, "unable to enable the hsusb 3p3\n");
- goto exit;
+ hsusb_1p8 = devm_regulator_get(motg->phy.dev, "HSUSB_1p8");
+ if (IS_ERR(hsusb_1p8)) {
+ dev_err(motg->phy.dev, "unable to get hsusb 1p8\n");
+ rc = PTR_ERR(hsusb_1p8);
+ goto put_3p3_lpm;
}
- rc = regulator_set_voltage(motg->v1p8, USB_PHY_1P8_VOL_MIN,
+ rc = regulator_set_voltage(hsusb_1p8, USB_PHY_1P8_VOL_MIN,
USB_PHY_1P8_VOL_MAX);
if (rc) {
- dev_err(motg->phy.dev, "Cannot set v1p8 voltage\n");
- goto disable_3p3;
- }
- rc = regulator_enable(motg->v1p8);
- if (rc) {
- dev_err(motg->phy.dev, "unable to enable the hsusb 1p8\n");
- goto disable_3p3;
+ dev_err(motg->phy.dev, "unable to set voltage level for hsusb 1p8\n"
+ );
+ goto put_1p8;
}
return 0;
}
- regulator_disable(motg->v1p8);
-disable_3p3:
- regulator_disable(motg->v3p3);
-exit:
+put_1p8:
+ regulator_set_voltage(hsusb_1p8, 0, USB_PHY_1P8_VOL_MAX);
+put_3p3_lpm:
+ regulator_set_voltage(hsusb_3p3, 0, USB_PHY_3P3_VOL_MAX);
return rc;
}
-static int msm_hsusb_ldo_set_mode(struct msm_otg *motg, int on)
+static int msm_hsusb_config_vddcx(int high)
+{
+ struct msm_otg *motg = the_msm_otg;
+ int max_vol = vdd_val[VDD_MAX];
+ int min_vol;
+ int ret;
+
+ min_vol = vdd_val[!!high];
+ ret = regulator_set_voltage(hsusb_vdd, min_vol, max_vol);
+ if (ret) {
+ pr_err("%s: unable to set the voltage for regulator HSUSB_VDDCX\n",
+ __func__);
+ return ret;
+ }
+
+ pr_debug("%s: min_vol:%d max_vol:%d\n", __func__, min_vol, max_vol);
+ msm_otg_dbg_log_event(&motg->phy, "CONFIG VDDCX", min_vol, max_vol);
+
+ return ret;
+}
+
+static int msm_hsusb_ldo_enable(struct msm_otg *motg,
+ enum msm_otg_phy_reg_mode mode)
{
int ret = 0;
- if (on) {
- ret = regulator_set_load(motg->v1p8, USB_PHY_1P8_HPM_LOAD);
- if (ret < 0) {
- pr_err("Could not set HPM for v1p8\n");
- return ret;
- }
- ret = regulator_set_load(motg->v3p3, USB_PHY_3P3_HPM_LOAD);
- if (ret < 0) {
- pr_err("Could not set HPM for v3p3\n");
- regulator_set_load(motg->v1p8, USB_PHY_1P8_LPM_LOAD);
- return ret;
- }
- } else {
- ret = regulator_set_load(motg->v1p8, USB_PHY_1P8_LPM_LOAD);
- if (ret < 0)
- pr_err("Could not set LPM for v1p8\n");
- ret = regulator_set_load(motg->v3p3, USB_PHY_3P3_LPM_LOAD);
- if (ret < 0)
- pr_err("Could not set LPM for v3p3\n");
+ if (IS_ERR(hsusb_1p8)) {
+ pr_err("%s: HSUSB_1p8 is not initialized\n", __func__);
+ return -ENODEV;
}
- pr_debug("reg (%s)\n", on ? "HPM" : "LPM");
+ if (IS_ERR(hsusb_3p3)) {
+ pr_err("%s: HSUSB_3p3 is not initialized\n", __func__);
+ return -ENODEV;
+ }
+
+ switch (mode) {
+ case USB_PHY_REG_ON:
+ ret = regulator_set_load(hsusb_1p8, USB_PHY_1P8_HPM_LOAD);
+ if (ret < 0) {
+ pr_err("%s: Unable to set HPM of the regulator HSUSB_1p8\n",
+ __func__);
+ return ret;
+ }
+
+ ret = regulator_enable(hsusb_1p8);
+ if (ret) {
+ dev_err(motg->phy.dev, "%s: unable to enable the hsusb 1p8\n",
+ __func__);
+ regulator_set_load(hsusb_1p8, 0);
+ return ret;
+ }
+
+ /* fall through */
+ case USB_PHY_REG_3P3_ON:
+ ret = regulator_set_load(hsusb_3p3, USB_PHY_3P3_HPM_LOAD);
+ if (ret < 0) {
+ pr_err("%s: Unable to set HPM of the regulator HSUSB_3p3\n",
+ __func__);
+ if (mode == USB_PHY_REG_ON) {
+ regulator_set_load(hsusb_1p8, 0);
+ regulator_disable(hsusb_1p8);
+ }
+ return ret;
+ }
+
+ ret = regulator_enable(hsusb_3p3);
+ if (ret) {
+ dev_err(motg->phy.dev, "%s: unable to enable the hsusb 3p3\n",
+ __func__);
+ regulator_set_load(hsusb_3p3, 0);
+ if (mode == USB_PHY_REG_ON) {
+ regulator_set_load(hsusb_1p8, 0);
+ regulator_disable(hsusb_1p8);
+ }
+ return ret;
+ }
+
+ break;
+
+ case USB_PHY_REG_OFF:
+ ret = regulator_disable(hsusb_1p8);
+ if (ret) {
+ dev_err(motg->phy.dev, "%s: unable to disable the hsusb 1p8\n",
+ __func__);
+ return ret;
+ }
+
+ ret = regulator_set_load(hsusb_1p8, 0);
+ if (ret < 0)
+ pr_err("%s: Unable to set LPM of the regulator HSUSB_1p8\n",
+ __func__);
+
+ /* fall through */
+ case USB_PHY_REG_3P3_OFF:
+ ret = regulator_disable(hsusb_3p3);
+ if (ret) {
+ dev_err(motg->phy.dev, "%s: unable to disable the hsusb 3p3\n",
+ __func__);
+ return ret;
+ }
+ ret = regulator_set_load(hsusb_3p3, 0);
+ if (ret < 0)
+ pr_err("%s: Unable to set LPM of the regulator HSUSB_3p3\n",
+ __func__);
+
+ break;
+
+ case USB_PHY_REG_LPM_ON:
+ ret = regulator_set_load(hsusb_1p8, USB_PHY_1P8_LPM_LOAD);
+ if (ret < 0) {
+ pr_err("%s: Unable to set LPM of the regulator: HSUSB_1p8\n",
+ __func__);
+ return ret;
+ }
+
+ ret = regulator_set_load(hsusb_3p3, USB_PHY_3P3_LPM_LOAD);
+ if (ret < 0) {
+ pr_err("%s: Unable to set LPM of the regulator: HSUSB_3p3\n",
+ __func__);
+ regulator_set_load(hsusb_1p8, USB_PHY_REG_ON);
+ return ret;
+ }
+
+ break;
+
+ case USB_PHY_REG_LPM_OFF:
+ ret = regulator_set_load(hsusb_1p8, USB_PHY_1P8_HPM_LOAD);
+ if (ret < 0) {
+ pr_err("%s: Unable to set HPM of the regulator: HSUSB_1p8\n",
+ __func__);
+ return ret;
+ }
+
+ ret = regulator_set_load(hsusb_3p3, USB_PHY_3P3_HPM_LOAD);
+ if (ret < 0) {
+ pr_err("%s: Unable to set HPM of the regulator: HSUSB_3p3\n",
+ __func__);
+ regulator_set_load(hsusb_1p8, USB_PHY_REG_ON);
+ return ret;
+ }
+
+ break;
+
+ default:
+ pr_err("%s: Unsupported mode (%d).", __func__, mode);
+ return -ENOTSUPP;
+ }
+
+ pr_debug("%s: USB reg mode (%d) (OFF/HPM/LPM)\n", __func__, mode);
+ msm_otg_dbg_log_event(&motg->phy, "USB REG MODE", mode, ret);
return ret < 0 ? ret : 0;
}
@@ -349,13 +586,22 @@
struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
int cnt = 0;
+ if (motg->pdata->emulation)
+ return 0;
+
+ if (motg->pdata->phy_type == QUSB_ULPI_PHY && reg > 0x3F) {
+ pr_debug("%s: ULPI vendor-specific reg 0x%02x not supported\n",
+ __func__, reg);
+ return 0;
+ }
+
/* initiate read operation */
- writel(ULPI_RUN | ULPI_READ | ULPI_ADDR(reg),
+ writel_relaxed(ULPI_RUN | ULPI_READ | ULPI_ADDR(reg),
USB_ULPI_VIEWPORT);
/* wait for completion */
while (cnt < ULPI_IO_TIMEOUT_USEC) {
- if (!(readl(USB_ULPI_VIEWPORT) & ULPI_RUN))
+ if (!(readl_relaxed(USB_ULPI_VIEWPORT) & ULPI_RUN))
break;
udelay(1);
cnt++;
@@ -363,10 +609,12 @@
if (cnt >= ULPI_IO_TIMEOUT_USEC) {
dev_err(phy->dev, "ulpi_read: timeout %08x\n",
- readl(USB_ULPI_VIEWPORT));
+ readl_relaxed(USB_ULPI_VIEWPORT));
+ dev_err(phy->dev, "PORTSC: %08x USBCMD: %08x\n",
+ readl_relaxed(USB_PORTSC), readl_relaxed(USB_USBCMD));
return -ETIMEDOUT;
}
- return ULPI_DATA_READ(readl(USB_ULPI_VIEWPORT));
+ return ULPI_DATA_READ(readl_relaxed(USB_ULPI_VIEWPORT));
}
static int ulpi_write(struct usb_phy *phy, u32 val, u32 reg)
@@ -374,14 +622,23 @@
struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
int cnt = 0;
+ if (motg->pdata->emulation)
+ return 0;
+
+ if (motg->pdata->phy_type == QUSB_ULPI_PHY && reg > 0x3F) {
+ pr_debug("%s: ULPI vendor-specific reg 0x%02x not supported\n",
+ __func__, reg);
+ return 0;
+ }
+
/* initiate write operation */
- writel(ULPI_RUN | ULPI_WRITE |
+ writel_relaxed(ULPI_RUN | ULPI_WRITE |
ULPI_ADDR(reg) | ULPI_DATA(val),
USB_ULPI_VIEWPORT);
/* wait for completion */
while (cnt < ULPI_IO_TIMEOUT_USEC) {
- if (!(readl(USB_ULPI_VIEWPORT) & ULPI_RUN))
+ if (!(readl_relaxed(USB_ULPI_VIEWPORT) & ULPI_RUN))
break;
udelay(1);
cnt++;
@@ -389,6 +646,8 @@
if (cnt >= ULPI_IO_TIMEOUT_USEC) {
dev_err(phy->dev, "ulpi_write: timeout\n");
+ dev_err(phy->dev, "PORTSC: %08x USBCMD: %08x\n",
+ readl_relaxed(USB_PORTSC), readl_relaxed(USB_USBCMD));
return -ETIMEDOUT;
}
return 0;
@@ -402,38 +661,71 @@
static void ulpi_init(struct msm_otg *motg)
{
struct msm_otg_platform_data *pdata = motg->pdata;
- int *seq = pdata->phy_init_seq, idx;
- u32 addr = ULPI_EXT_VENDOR_SPECIFIC;
+ int aseq[10];
+ int *seq = NULL;
- for (idx = 0; idx < pdata->phy_init_sz; idx++) {
- if (seq[idx] == -1)
- continue;
+ if (override_phy_init) {
+ pr_debug("%s(): HUSB PHY Init:%s\n", __func__,
+ override_phy_init);
+ get_options(override_phy_init, ARRAY_SIZE(aseq), aseq);
+ seq = &aseq[1];
+ } else {
+ seq = pdata->phy_init_seq;
+ }
+
+ if (!seq)
+ return;
+
+ while (seq[0] >= 0) {
+ if (override_phy_init)
+ pr_debug("ulpi: write 0x%02x to 0x%02x\n",
+ seq[0], seq[1]);
dev_vdbg(motg->phy.dev, "ulpi: write 0x%02x to 0x%02x\n",
- seq[idx], addr + idx);
- ulpi_write(&motg->phy, seq[idx], addr + idx);
+ seq[0], seq[1]);
+ msm_otg_dbg_log_event(&motg->phy, "ULPI WRITE", seq[0], seq[1]);
+ ulpi_write(&motg->phy, seq[0], seq[1]);
+ seq += 2;
}
}
-static int msm_phy_notify_disconnect(struct usb_phy *phy,
- enum usb_device_speed speed)
+static int msm_otg_phy_clk_reset(struct msm_otg *motg)
{
- struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
- int val;
+ int ret;
- if (motg->manual_pullup) {
- val = ULPI_MISC_A_VBUSVLDEXT | ULPI_MISC_A_VBUSVLDEXTSEL;
- usb_phy_io_write(phy, val, ULPI_CLR(ULPI_MISC_A));
+ if (!motg->phy_reset_clk)
+ return 0;
+
+ if (motg->sleep_clk)
+ clk_disable_unprepare(motg->sleep_clk);
+ if (motg->phy_csr_clk)
+ clk_disable_unprepare(motg->phy_csr_clk);
+
+ ret = reset_control_assert(motg->phy_reset);
+ if (ret) {
+ pr_err("phy_reset_clk assert failed %d\n", ret);
+ return ret;
}
-
/*
- * Put the transceiver in non-driving mode. Otherwise host
- * may not detect soft-disconnection.
+ * As per databook, 10 usec delay is required between
+ * PHY POR assert and de-assert.
*/
- val = ulpi_read(phy, ULPI_FUNC_CTRL);
- val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
- val |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
- ulpi_write(phy, val, ULPI_FUNC_CTRL);
+ usleep_range(10, 15);
+ ret = reset_control_deassert(motg->phy_reset);
+ if (ret) {
+ pr_err("phy_reset_clk de-assert failed %d\n", ret);
+ return ret;
+ }
+ /*
+ * As per databook, it takes 75 usec for PHY to stabilize
+ * after the reset.
+ */
+ usleep_range(80, 100);
+
+ if (motg->phy_csr_clk)
+ clk_prepare_enable(motg->phy_csr_clk);
+ if (motg->sleep_clk)
+ clk_prepare_enable(motg->sleep_clk);
return 0;
}
@@ -442,40 +734,54 @@
{
int ret;
- if (assert)
- ret = reset_control_assert(motg->link_rst);
- else
- ret = reset_control_deassert(motg->link_rst);
-
- if (ret)
- dev_err(motg->phy.dev, "usb link clk reset %s failed\n",
- assert ? "assert" : "deassert");
-
+ if (assert) {
+ /* Using asynchronous block reset to the hardware */
+ dev_dbg(motg->phy.dev, "block_reset ASSERT\n");
+ clk_disable_unprepare(motg->pclk);
+ clk_disable_unprepare(motg->core_clk);
+ ret = reset_control_assert(motg->core_reset);
+ if (ret)
+ dev_err(motg->phy.dev, "usb hs_clk assert failed\n");
+ } else {
+ dev_dbg(motg->phy.dev, "block_reset DEASSERT\n");
+ ret = reset_control_deassert(motg->core_reset);
+ ndelay(200);
+ ret = clk_prepare_enable(motg->core_clk);
+ WARN(ret, "USB core_clk enable failed\n");
+ ret = clk_prepare_enable(motg->pclk);
+ WARN(ret, "USB pclk enable failed\n");
+ if (ret)
+ dev_err(motg->phy.dev, "usb hs_clk deassert failed\n");
+ }
return ret;
}
-static int msm_otg_phy_clk_reset(struct msm_otg *motg)
-{
- int ret = 0;
-
- if (motg->phy_rst)
- ret = reset_control_reset(motg->phy_rst);
-
- if (ret)
- dev_err(motg->phy.dev, "usb phy clk reset failed\n");
-
- return ret;
-}
-
-static int msm_link_reset(struct msm_otg *motg)
+static int msm_otg_phy_reset(struct msm_otg *motg)
{
u32 val;
int ret;
+ struct msm_otg_platform_data *pdata = motg->pdata;
+
+ /*
+ * AHB2AHB Bypass mode shouldn't be enable before doing
+ * async clock reset. If it is enable, disable the same.
+ */
+ val = readl_relaxed(USB_AHBMODE);
+ if (val & AHB2AHB_BYPASS) {
+ pr_err("%s(): AHB2AHB_BYPASS SET: AHBMODE:%x\n",
+ __func__, val);
+ val &= ~AHB2AHB_BYPASS_BIT_MASK;
+ writel_relaxed(val | AHB2AHB_BYPASS_CLEAR, USB_AHBMODE);
+ pr_err("%s(): AHBMODE: %x\n", __func__,
+ readl_relaxed(USB_AHBMODE));
+ }
ret = msm_otg_link_clk_reset(motg, 1);
if (ret)
return ret;
+ msm_otg_phy_clk_reset(motg);
+
/* wait for 1ms delay as suggested in HPG. */
usleep_range(1000, 1200);
@@ -483,24 +789,27 @@
if (ret)
return ret;
- if (motg->phy_number)
- writel(readl(USB_PHY_CTRL2) | BIT(16), USB_PHY_CTRL2);
+ if (pdata && pdata->enable_sec_phy)
+ writel_relaxed(readl_relaxed(USB_PHY_CTRL2) | (1<<16),
+ USB_PHY_CTRL2);
+ val = readl_relaxed(USB_PORTSC) & ~PORTSC_PTS_MASK;
+ writel_relaxed(val | PORTSC_PTS_ULPI, USB_PORTSC);
- /* put transceiver in serial mode as part of reset */
- val = readl(USB_PORTSC) & ~PORTSC_PTS_MASK;
- writel(val | PORTSC_PTS_SERIAL, USB_PORTSC);
-
+ dev_info(motg->phy.dev, "phy_reset: success\n");
+ msm_otg_dbg_log_event(&motg->phy, "PHY RESET SUCCESS",
+ motg->inputs, motg->phy.otg->state);
return 0;
}
-static int msm_otg_reset(struct usb_phy *phy)
+#define LINK_RESET_TIMEOUT_USEC (250 * 1000)
+static int msm_otg_link_reset(struct msm_otg *motg)
{
- struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
int cnt = 0;
+ struct msm_otg_platform_data *pdata = motg->pdata;
- writel(USBCMD_RESET, USB_USBCMD);
+ writel_relaxed(USBCMD_RESET, USB_USBCMD);
while (cnt < LINK_RESET_TIMEOUT_USEC) {
- if (!(readl(USB_USBCMD) & USBCMD_RESET))
+ if (!(readl_relaxed(USB_USBCMD) & USBCMD_RESET))
break;
udelay(1);
cnt++;
@@ -508,59 +817,173 @@
if (cnt >= LINK_RESET_TIMEOUT_USEC)
return -ETIMEDOUT;
- /* select ULPI phy and clear other status/control bits in PORTSC */
- writel(PORTSC_PTS_ULPI, USB_PORTSC);
+ /* select ULPI phy */
+ writel_relaxed(0x80000000, USB_PORTSC);
+ writel_relaxed(0x0, USB_AHBBURST);
+ writel_relaxed(0x08, USB_AHBMODE);
- writel(0x0, USB_AHBBURST);
- writel(0x08, USB_AHBMODE);
-
- if (motg->phy_number)
- writel(readl(USB_PHY_CTRL2) | BIT(16), USB_PHY_CTRL2);
+ if (pdata && pdata->enable_sec_phy)
+ writel_relaxed(readl_relaxed(USB_PHY_CTRL2) | (1<<16),
+ USB_PHY_CTRL2);
return 0;
}
-static void msm_phy_reset(struct msm_otg *motg)
+#define QUSB2PHY_PORT_POWERDOWN 0xB4
+#define QUSB2PHY_PORT_UTMI_CTRL2 0xC4
+
+static void msm_usb_phy_reset(struct msm_otg *motg)
{
- void __iomem *addr;
+ u32 val;
+ int ret, *seq;
- if (motg->pdata->phy_type != SNPS_28NM_INTEGRATED_PHY) {
- msm_otg_phy_clk_reset(motg);
- return;
+ switch (motg->pdata->phy_type) {
+ case SNPS_PICO_PHY:
+ /* Assert USB PHY_PON */
+ val = readl_relaxed(motg->usb_phy_ctrl_reg);
+ val &= ~PHY_POR_BIT_MASK;
+ val |= PHY_POR_ASSERT;
+ writel_relaxed(val, motg->usb_phy_ctrl_reg);
+
+ /* wait for minimum 10 microseconds as
+ * suggested in HPG.
+ */
+ usleep_range(10, 15);
+
+ /* Deassert USB PHY_PON */
+ val = readl_relaxed(motg->usb_phy_ctrl_reg);
+ val &= ~PHY_POR_BIT_MASK;
+ val |= PHY_POR_DEASSERT;
+ writel_relaxed(val, motg->usb_phy_ctrl_reg);
+ break;
+ case QUSB_ULPI_PHY:
+ ret = reset_control_assert(motg->phy_reset);
+ if (ret) {
+ pr_err("phy_reset_clk assert failed %d\n", ret);
+ break;
+ }
+
+ /* need to delay 10us for PHY to reset */
+ usleep_range(10, 20);
+
+ ret = reset_control_deassert(motg->phy_reset);
+ if (ret) {
+ pr_err("phy_reset_clk de-assert failed %d\n", ret);
+ break;
+ }
+
+ /* Ensure that RESET operation is completed. */
+ mb();
+
+ writel_relaxed(0x23,
+ motg->phy_csr_regs + QUSB2PHY_PORT_POWERDOWN);
+ writel_relaxed(0x0,
+ motg->phy_csr_regs + QUSB2PHY_PORT_UTMI_CTRL2);
+
+ /* Program tuning parameters for PHY */
+ seq = motg->pdata->phy_init_seq;
+ if (seq) {
+ while (seq[0] >= 0) {
+ writel_relaxed(seq[1],
+ motg->phy_csr_regs + seq[0]);
+ seq += 2;
+ }
+ }
+
+ /* ensure above writes are completed before re-enabling PHY */
+ wmb();
+ writel_relaxed(0x22,
+ motg->phy_csr_regs + QUSB2PHY_PORT_POWERDOWN);
+ break;
+ case SNPS_FEMTO_PHY:
+ if (!motg->phy_por_clk) {
+ pr_err("phy_por_clk missing\n");
+ break;
+ }
+ ret = reset_control_assert(motg->phy_por_reset);
+ if (ret) {
+ pr_err("phy_por_clk assert failed %d\n", ret);
+ break;
+ }
+ /*
+ * The Femto PHY is POR reset in the following scenarios.
+ *
+ * 1. After overriding the parameter registers.
+ * 2. Low power mode exit from PHY retention.
+ *
+ * Ensure that SIDDQ is cleared before bringing the PHY
+ * out of reset.
+ *
+ */
+
+ val = readb_relaxed(USB_PHY_CSR_PHY_CTRL_COMMON0);
+ val &= ~SIDDQ;
+ writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL_COMMON0);
+
+ /*
+ * As per databook, 10 usec delay is required between
+ * PHY POR assert and de-assert.
+ */
+ usleep_range(10, 20);
+ ret = reset_control_deassert(motg->phy_por_reset);
+ if (ret) {
+ pr_err("phy_por_clk de-assert failed %d\n", ret);
+ break;
+ }
+ /*
+ * As per databook, it takes 75 usec for PHY to stabilize
+ * after the reset.
+ */
+ usleep_range(80, 100);
+ break;
+ default:
+ break;
}
-
- addr = USB_PHY_CTRL;
- if (motg->phy_number)
- addr = USB_PHY_CTRL2;
-
- /* Assert USB PHY_POR */
- writel(readl(addr) | PHY_POR_ASSERT, addr);
-
- /*
- * wait for minimum 10 microseconds as suggested in HPG.
- * Use a slightly larger value since the exact value didn't
- * work 100% of the time.
- */
- udelay(12);
-
- /* Deassert USB PHY_POR */
- writel(readl(addr) & ~PHY_POR_ASSERT, addr);
+ /* Ensure that RESET operation is completed. */
+ mb();
}
-static int msm_usb_reset(struct usb_phy *phy)
+static int msm_otg_reset(struct usb_phy *phy)
{
struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
+ struct msm_otg_platform_data *pdata = motg->pdata;
int ret;
+ u32 val = 0;
+ u32 ulpi_val = 0;
- if (!IS_ERR(motg->core_clk))
- clk_prepare_enable(motg->core_clk);
+ msm_otg_dbg_log_event(&motg->phy, "USB RESET", phy->otg->state,
+ get_pm_runtime_counter(phy->dev));
+ /*
+ * USB PHY and Link reset also reset the USB BAM.
+ * Thus perform reset operation only once to avoid
+ * USB BAM reset on other cases e.g. USB cable disconnections.
+ * If hardware reported error then it must be reset for recovery.
+ */
+ if (motg->err_event_seen)
+ dev_info(phy->dev, "performing USB h/w reset for recovery\n");
+ else if (pdata->disable_reset_on_disconnect && motg->reset_counter)
+ return 0;
- ret = msm_link_reset(motg);
+ motg->reset_counter++;
+
+ disable_irq(motg->irq);
+ if (motg->phy_irq)
+ disable_irq(motg->phy_irq);
+
+ ret = msm_otg_phy_reset(motg);
if (ret) {
dev_err(phy->dev, "phy_reset failed\n");
+ if (motg->phy_irq)
+ enable_irq(motg->phy_irq);
+
+ enable_irq(motg->irq);
return ret;
}
- ret = msm_otg_reset(&motg->phy);
+ if (motg->phy_irq)
+ enable_irq(motg->phy_irq);
+
+ enable_irq(motg->irq);
+ ret = msm_otg_link_reset(motg);
if (ret) {
dev_err(phy->dev, "link reset failed\n");
return ret;
@@ -569,148 +992,565 @@
msleep(100);
/* Reset USB PHY after performing USB Link RESET */
- msm_phy_reset(motg);
-
- if (!IS_ERR(motg->core_clk))
- clk_disable_unprepare(motg->core_clk);
-
- return 0;
-}
-
-static int msm_phy_init(struct usb_phy *phy)
-{
- struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
- struct msm_otg_platform_data *pdata = motg->pdata;
- u32 val, ulpi_val = 0;
+ msm_usb_phy_reset(motg);
/* Program USB PHY Override registers. */
ulpi_init(motg);
/*
- * It is recommended in HPG to reset USB PHY after programming
- * USB PHY Override registers.
+ * It is required to reset USB PHY after programming
+ * the USB PHY Override registers to get the new
+ * values into effect.
*/
- msm_phy_reset(motg);
+ msm_usb_phy_reset(motg);
if (pdata->otg_control == OTG_PHY_CONTROL) {
- val = readl(USB_OTGSC);
- if (pdata->mode == USB_DR_MODE_OTG) {
+ val = readl_relaxed(USB_OTGSC);
+ if (pdata->mode == USB_OTG) {
ulpi_val = ULPI_INT_IDGRD | ULPI_INT_SESS_VALID;
val |= OTGSC_IDIE | OTGSC_BSVIE;
- } else if (pdata->mode == USB_DR_MODE_PERIPHERAL) {
+ } else if (pdata->mode == USB_PERIPHERAL) {
ulpi_val = ULPI_INT_SESS_VALID;
val |= OTGSC_BSVIE;
}
- writel(val, USB_OTGSC);
+ writel_relaxed(val, USB_OTGSC);
ulpi_write(phy, ulpi_val, ULPI_USB_INT_EN_RISE);
ulpi_write(phy, ulpi_val, ULPI_USB_INT_EN_FALL);
+ } else if (pdata->otg_control == OTG_PMIC_CONTROL) {
+ ulpi_write(phy, OTG_COMP_DISABLE,
+ ULPI_SET(ULPI_PWR_CLK_MNG_REG));
+ if (motg->phy_irq)
+ writeb_relaxed(USB_PHY_ID_MASK,
+ USB2_PHY_USB_PHY_INTERRUPT_MASK1);
}
- if (motg->manual_pullup) {
- val = ULPI_MISC_A_VBUSVLDEXTSEL | ULPI_MISC_A_VBUSVLDEXT;
- ulpi_write(phy, val, ULPI_SET(ULPI_MISC_A));
+ if (motg->caps & ALLOW_VDD_MIN_WITH_RETENTION_DISABLED)
+ writel_relaxed(readl_relaxed(USB_OTGSC) & ~(OTGSC_IDPU),
+ USB_OTGSC);
- val = readl(USB_GENCONFIG_2);
- val |= GENCONFIG_2_SESS_VLD_CTRL_EN;
- writel(val, USB_GENCONFIG_2);
+ msm_otg_dbg_log_event(&motg->phy, "USB RESET DONE", phy->otg->state,
+ get_pm_runtime_counter(phy->dev));
- val = readl(USB_USBCMD);
- val |= USBCMD_SESS_VLD_CTRL;
- writel(val, USB_USBCMD);
+ if (pdata->enable_axi_prefetch)
+ writel_relaxed(readl_relaxed(USB_HS_APF_CTRL) | (APF_CTRL_EN),
+ USB_HS_APF_CTRL);
- val = ulpi_read(phy, ULPI_FUNC_CTRL);
- val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
- val |= ULPI_FUNC_CTRL_OPMODE_NORMAL;
- ulpi_write(phy, val, ULPI_FUNC_CTRL);
- }
-
- if (motg->phy_number)
- writel(readl(USB_PHY_CTRL2) | BIT(16), USB_PHY_CTRL2);
+ /*
+ * Disable USB BAM as block reset resets USB BAM registers.
+ */
+ msm_usb_bam_enable(CI_CTRL, false);
return 0;
}
-#define PHY_SUSPEND_TIMEOUT_USEC (500 * 1000)
-#define PHY_RESUME_TIMEOUT_USEC (100 * 1000)
-
-#ifdef CONFIG_PM
-
-static int msm_hsusb_config_vddcx(struct msm_otg *motg, int high)
+static void msm_otg_kick_sm_work(struct msm_otg *motg)
{
- int max_vol = motg->vdd_levels[VDD_LEVEL_MAX];
- int min_vol;
- int ret;
+ if (atomic_read(&motg->in_lpm))
+ motg->resume_pending = true;
- if (high)
- min_vol = motg->vdd_levels[VDD_LEVEL_MIN];
- else
- min_vol = motg->vdd_levels[VDD_LEVEL_NONE];
-
- ret = regulator_set_voltage(motg->vddcx, min_vol, max_vol);
- if (ret) {
- pr_err("Cannot set vddcx voltage\n");
- return ret;
+ /* For device mode, resume now. Let pm_resume handle other cases */
+ if (atomic_read(&motg->pm_suspended) &&
+ motg->phy.otg->state != OTG_STATE_B_SUSPEND) {
+ motg->sm_work_pending = true;
+ } else if (!motg->sm_work_pending) {
+ /* process event only if previous one is not pending */
+ queue_work(motg->otg_wq, &motg->sm_work);
}
-
- pr_debug("%s: min_vol:%d max_vol:%d\n", __func__, min_vol, max_vol);
-
- return ret;
}
+/*
+ * UDC calls usb_phy_set_suspend() to notify during bus suspend/resume.
+ * Update relevant state-machine inputs and queue sm_work.
+ * LPM enter/exit doesn't happen directly from this routine.
+ */
+
+static int msm_otg_set_suspend(struct usb_phy *phy, int suspend)
+{
+ struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
+
+ pr_debug("%s(%d) in %s state\n", __func__, suspend,
+ usb_otg_state_string(phy->otg->state));
+ msm_otg_dbg_log_event(phy, "SET SUSPEND", suspend, phy->otg->state);
+
+ if (!(motg->caps & ALLOW_LPM_ON_DEV_SUSPEND))
+ return 0;
+
+ if (suspend) {
+ /* called in suspend interrupt context */
+ pr_debug("peripheral bus suspend\n");
+ msm_otg_dbg_log_event(phy, "PERIPHERAL BUS SUSPEND",
+ motg->inputs, phy->otg->state);
+
+ set_bit(A_BUS_SUSPEND, &motg->inputs);
+ } else {
+ /* host resume or remote-wakeup */
+ pr_debug("peripheral bus resume\n");
+ msm_otg_dbg_log_event(phy, "PERIPHERAL BUS RESUME",
+ motg->inputs, phy->otg->state);
+
+ clear_bit(A_BUS_SUSPEND, &motg->inputs);
+ }
+ /* use kick_sm_work to handle race with pm_resume */
+ msm_otg_kick_sm_work(motg);
+
+ return 0;
+}
+
+static int msm_otg_bus_freq_set(struct msm_otg *motg, enum usb_noc_mode mode)
+{
+ int i, ret;
+ long rate;
+
+ for (i = 0; i < USB_NUM_BUS_CLOCKS; i++) {
+ rate = bus_freqs[mode][i];
+ if (!rate) {
+ pr_debug("%s rate not available\n", bus_clkname[i]);
+ continue;
+ }
+
+ ret = clk_set_rate(motg->bus_clks[i], rate);
+ if (ret) {
+ pr_err("%s set rate failed: %d\n", bus_clkname[i], ret);
+ return ret;
+ }
+ pr_debug("%s set to %lu Hz\n", bus_clkname[i],
+ clk_get_rate(motg->bus_clks[i]));
+ msm_otg_dbg_log_event(&motg->phy, "OTG BUS FREQ SET", i, rate);
+ }
+
+ bus_clk_rate_set = true;
+
+ return 0;
+}
+
+static int msm_otg_bus_freq_get(struct msm_otg *motg)
+{
+ struct device *dev = motg->phy.dev;
+ struct device_node *np = dev->of_node;
+ int len = 0, i, count = USB_NUM_BUS_CLOCKS;
+
+ if (!np)
+ return -EINVAL;
+
+ of_find_property(np, "qcom,bus-clk-rate", &len);
+ /* SVS requires extra set of frequencies for perf_mode sysfs node */
+ if (motg->default_noc_mode == USB_NOC_SVS_VOTE)
+ count *= 2;
+
+ if (!len || (len / sizeof(u32) != count)) {
+ pr_err("Invalid bus rate:%d %u\n", len, motg->default_noc_mode);
+ return -EINVAL;
+ }
+ of_property_read_u32_array(np, "qcom,bus-clk-rate", bus_freqs[0],
+ count);
+ for (i = 0; i < USB_NUM_BUS_CLOCKS; i++) {
+ if (bus_freqs[0][i] == 0) {
+ motg->bus_clks[i] = NULL;
+ pr_debug("%s not available\n", bus_clkname[i]);
+ continue;
+ }
+
+ motg->bus_clks[i] = devm_clk_get(dev, bus_clkname[i]);
+ if (IS_ERR(motg->bus_clks[i])) {
+ pr_err("%s get failed\n", bus_clkname[i]);
+ return PTR_ERR(motg->bus_clks[i]);
+ }
+ }
+ return 0;
+}
+
+static void msm_otg_bus_clks_enable(struct msm_otg *motg)
+{
+ int i;
+ int ret;
+
+ if (!bus_clk_rate_set || motg->bus_clks_enabled)
+ return;
+
+ for (i = 0; i < USB_NUM_BUS_CLOCKS; i++) {
+ if (motg->bus_clks[i] == NULL)
+ continue;
+ ret = clk_prepare_enable(motg->bus_clks[i]);
+ if (ret) {
+ pr_err("%s enable rate failed: %d\n", bus_clkname[i],
+ ret);
+ goto err_clk_en;
+ }
+ }
+ motg->bus_clks_enabled = true;
+ return;
+err_clk_en:
+ for (--i; i >= 0; --i) {
+ if (motg->bus_clks[i] != NULL)
+ clk_disable_unprepare(motg->bus_clks[i]);
+ }
+}
+
+static void msm_otg_bus_clks_disable(struct msm_otg *motg)
+{
+ int i;
+
+ if (!bus_clk_rate_set || !motg->bus_clks_enabled)
+ return;
+
+ for (i = 0; i < USB_NUM_BUS_CLOCKS; i++) {
+ if (motg->bus_clks[i] != NULL)
+ clk_disable_unprepare(motg->bus_clks[i]);
+ }
+ motg->bus_clks_enabled = false;
+}
+
+static void msm_otg_bus_vote(struct msm_otg *motg, enum usb_bus_vote vote)
+{
+ int ret;
+ struct msm_otg_platform_data *pdata = motg->pdata;
+
+ msm_otg_dbg_log_event(&motg->phy, "BUS VOTE", vote,
+ motg->phy.otg->state);
+ /* Check if target allows min_vote to be same as no_vote */
+ if (pdata->bus_scale_table &&
+ vote >= pdata->bus_scale_table->num_usecases)
+ vote = USB_NO_PERF_VOTE;
+
+ if (motg->bus_perf_client) {
+ ret = msm_bus_scale_client_update_request(
+ motg->bus_perf_client, vote);
+ if (ret)
+ dev_err(motg->phy.dev, "%s: Failed to vote (%d)\n"
+ "for bus bw %d\n", __func__, vote, ret);
+ }
+
+ if (vote == USB_MAX_PERF_VOTE)
+ msm_otg_bus_clks_enable(motg);
+ else
+ msm_otg_bus_clks_disable(motg);
+}
+
+static void msm_otg_enable_phy_hv_int(struct msm_otg *motg)
+{
+ bool bsv_id_hv_int = false;
+ bool dp_dm_hv_int = false;
+ u32 val;
+
+ if (motg->pdata->otg_control == OTG_PHY_CONTROL ||
+ motg->phy_irq)
+ bsv_id_hv_int = true;
+ if (motg->host_bus_suspend || motg->device_bus_suspend)
+ dp_dm_hv_int = true;
+
+ if (!bsv_id_hv_int && !dp_dm_hv_int)
+ return;
+
+ switch (motg->pdata->phy_type) {
+ case SNPS_PICO_PHY:
+ val = readl_relaxed(motg->usb_phy_ctrl_reg);
+ if (bsv_id_hv_int)
+ val |= (PHY_IDHV_INTEN | PHY_OTGSESSVLDHV_INTEN);
+ if (dp_dm_hv_int)
+ val |= PHY_CLAMP_DPDMSE_EN;
+ writel_relaxed(val, motg->usb_phy_ctrl_reg);
+ break;
+ case SNPS_FEMTO_PHY:
+ if (bsv_id_hv_int) {
+ val = readb_relaxed(USB_PHY_CSR_PHY_CTRL1);
+ val |= ID_HV_CLAMP_EN_N;
+ writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL1);
+ }
+
+ if (dp_dm_hv_int) {
+ val = readb_relaxed(USB_PHY_CSR_PHY_CTRL3);
+ val |= CLAMP_MPM_DPSE_DMSE_EN_N;
+ writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL3);
+ }
+ break;
+ default:
+ break;
+ }
+ pr_debug("%s: bsv_id_hv = %d dp_dm_hv_int = %d\n",
+ __func__, bsv_id_hv_int, dp_dm_hv_int);
+ msm_otg_dbg_log_event(&motg->phy, "PHY HV INTR ENABLED",
+ bsv_id_hv_int, dp_dm_hv_int);
+}
+
+static void msm_otg_disable_phy_hv_int(struct msm_otg *motg)
+{
+ bool bsv_id_hv_int = false;
+ bool dp_dm_hv_int = false;
+ u32 val;
+
+ if (motg->pdata->otg_control == OTG_PHY_CONTROL ||
+ motg->phy_irq)
+ bsv_id_hv_int = true;
+ if (motg->host_bus_suspend || motg->device_bus_suspend)
+ dp_dm_hv_int = true;
+
+ if (!bsv_id_hv_int && !dp_dm_hv_int)
+ return;
+
+ switch (motg->pdata->phy_type) {
+ case SNPS_PICO_PHY:
+ val = readl_relaxed(motg->usb_phy_ctrl_reg);
+ if (bsv_id_hv_int)
+ val &= ~(PHY_IDHV_INTEN | PHY_OTGSESSVLDHV_INTEN);
+ if (dp_dm_hv_int)
+ val &= ~PHY_CLAMP_DPDMSE_EN;
+ writel_relaxed(val, motg->usb_phy_ctrl_reg);
+ break;
+ case SNPS_FEMTO_PHY:
+ if (bsv_id_hv_int) {
+ val = readb_relaxed(USB_PHY_CSR_PHY_CTRL1);
+ val &= ~ID_HV_CLAMP_EN_N;
+ writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL1);
+ }
+
+ if (dp_dm_hv_int) {
+ val = readb_relaxed(USB_PHY_CSR_PHY_CTRL3);
+ val &= ~CLAMP_MPM_DPSE_DMSE_EN_N;
+ writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL3);
+ }
+ break;
+ default:
+ break;
+ }
+ pr_debug("%s: bsv_id_hv = %d dp_dm_hv_int = %d\n",
+ __func__, bsv_id_hv_int, dp_dm_hv_int);
+ msm_otg_dbg_log_event(&motg->phy, "PHY HV INTR DISABLED",
+ bsv_id_hv_int, dp_dm_hv_int);
+}
+
+static void msm_otg_enter_phy_retention(struct msm_otg *motg)
+{
+ u32 val;
+
+ switch (motg->pdata->phy_type) {
+ case SNPS_PICO_PHY:
+ val = readl_relaxed(motg->usb_phy_ctrl_reg);
+ val &= ~PHY_RETEN;
+ writel_relaxed(val, motg->usb_phy_ctrl_reg);
+ break;
+ case SNPS_FEMTO_PHY:
+ /* Retention is supported via SIDDQ */
+ val = readb_relaxed(USB_PHY_CSR_PHY_CTRL_COMMON0);
+ val |= SIDDQ;
+ writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL_COMMON0);
+ break;
+ default:
+ break;
+ }
+ pr_debug("USB PHY is in retention\n");
+ msm_otg_dbg_log_event(&motg->phy, "USB PHY ENTER RETENTION",
+ motg->pdata->phy_type, 0);
+}
+
+static void msm_otg_exit_phy_retention(struct msm_otg *motg)
+{
+ int val;
+
+ switch (motg->pdata->phy_type) {
+ case SNPS_PICO_PHY:
+ val = readl_relaxed(motg->usb_phy_ctrl_reg);
+ val |= PHY_RETEN;
+ writel_relaxed(val, motg->usb_phy_ctrl_reg);
+ break;
+ case SNPS_FEMTO_PHY:
+ /*
+ * It is required to do USB block reset to bring Femto PHY out
+ * of retention.
+ */
+ msm_otg_reset(&motg->phy);
+ break;
+ default:
+ break;
+ }
+ pr_debug("USB PHY is exited from retention\n");
+ msm_otg_dbg_log_event(&motg->phy, "USB PHY EXIT RETENTION",
+ motg->pdata->phy_type, 0);
+}
+
+static void msm_id_status_w(struct work_struct *w);
+static irqreturn_t msm_otg_phy_irq_handler(int irq, void *data)
+{
+ struct msm_otg *motg = data;
+
+ msm_otg_dbg_log_event(&motg->phy, "PHY ID IRQ",
+ atomic_read(&motg->in_lpm), motg->phy.otg->state);
+ if (atomic_read(&motg->in_lpm)) {
+ pr_debug("PHY ID IRQ in LPM\n");
+ motg->phy_irq_pending = true;
+ msm_otg_kick_sm_work(motg);
+ } else {
+ pr_debug("PHY ID IRQ outside LPM\n");
+ msm_id_status_w(&motg->id_status_work.work);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#define PHY_SUSPEND_TIMEOUT_USEC (5 * 1000)
+#define PHY_DEVICE_BUS_SUSPEND_TIMEOUT_USEC 100
+#define PHY_RESUME_TIMEOUT_USEC (100 * 1000)
+
+#define PHY_SUSPEND_RETRIES_MAX 3
+
+static void msm_otg_set_vbus_state(int online);
+static void msm_otg_perf_vote_update(struct msm_otg *motg, bool perf_mode);
+
+#ifdef CONFIG_PM_SLEEP
static int msm_otg_suspend(struct msm_otg *motg)
{
struct usb_phy *phy = &motg->phy;
struct usb_bus *bus = phy->otg->host;
struct msm_otg_platform_data *pdata = motg->pdata;
- void __iomem *addr;
- int cnt = 0;
+ int cnt;
+ bool host_bus_suspend, device_bus_suspend, dcp, prop_charger;
+ bool floated_charger, sm_work_busy;
+ u32 cmd_val;
+ u32 portsc, config2;
+ u32 func_ctrl;
+ int phcd_retry_cnt = 0, ret;
+ unsigned int phy_suspend_timeout;
+
+ cnt = 0;
+ msm_otg_dbg_log_event(phy, "LPM ENTER START",
+ motg->inputs, phy->otg->state);
if (atomic_read(&motg->in_lpm))
return 0;
+ cancel_delayed_work_sync(&motg->perf_vote_work);
+
disable_irq(motg->irq);
+ if (motg->phy_irq)
+ disable_irq(motg->phy_irq);
+lpm_start:
+ host_bus_suspend = phy->otg->host && !test_bit(ID, &motg->inputs);
+ device_bus_suspend = phy->otg->gadget && test_bit(ID, &motg->inputs) &&
+ test_bit(A_BUS_SUSPEND, &motg->inputs) &&
+ motg->caps & ALLOW_LPM_ON_DEV_SUSPEND;
+
+ if (host_bus_suspend)
+ msm_otg_perf_vote_update(motg, false);
/*
- * Chipidea 45-nm PHY suspend sequence:
- *
- * Interrupt Latch Register auto-clear feature is not present
- * in all PHY versions. Latch register is clear on read type.
- * Clear latch register to avoid spurious wakeup from
- * low power mode (LPM).
- *
- * PHY comparators are disabled when PHY enters into low power
- * mode (LPM). Keep PHY comparators ON in LPM only when we expect
- * VBUS/Id notifications from USB PHY. Otherwise turn off USB
- * PHY comparators. This save significant amount of power.
- *
- * PLL is not turned off when PHY enters into low power mode (LPM).
- * Disable PLL for maximum power savings.
+ * Allow putting PHY into SIDDQ with wall charger connected in
+ * case of external charger detection.
*/
+ dcp = (motg->chg_type == USB_DCP_CHARGER) && !motg->is_ext_chg_dcp;
+ prop_charger = motg->chg_type == USB_NONCOMPLIANT_CHARGER;
+ floated_charger = motg->chg_type == USB_FLOATED_CHARGER;
- if (motg->pdata->phy_type == CI_45NM_INTEGRATED_PHY) {
- ulpi_read(phy, 0x14);
- if (pdata->otg_control == OTG_PHY_CONTROL)
- ulpi_write(phy, 0x01, 0x30);
- ulpi_write(phy, 0x08, 0x09);
- }
+ /* !BSV, but its handling is in progress by otg sm_work */
+ sm_work_busy = !test_bit(B_SESS_VLD, &motg->inputs) &&
+ phy->otg->state == OTG_STATE_B_PERIPHERAL;
- /*
- * PHY may take some time or even fail to enter into low power
- * mode (LPM). Hence poll for 500 msec and reset the PHY and link
- * in failure case.
- */
- writel(readl(USB_PORTSC) | PORTSC_PHCD, USB_PORTSC);
- while (cnt < PHY_SUSPEND_TIMEOUT_USEC) {
- if (readl(USB_PORTSC) & PORTSC_PHCD)
- break;
- udelay(1);
- cnt++;
- }
-
- if (cnt >= PHY_SUSPEND_TIMEOUT_USEC) {
- dev_err(phy->dev, "Unable to suspend PHY\n");
+ /* Perform block reset to recover from UDC error events on disconnect */
+ if (motg->err_event_seen)
msm_otg_reset(phy);
+
+ /* Enable line state difference wakeup fix for only device and host
+ * bus suspend scenarios. Otherwise PHY can not be suspended when
+ * a charger that pulls DP/DM high is connected.
+ */
+ config2 = readl_relaxed(USB_GENCONFIG_2);
+ if (device_bus_suspend)
+ config2 |= GENCONFIG_2_LINESTATE_DIFF_WAKEUP_EN;
+ else
+ config2 &= ~GENCONFIG_2_LINESTATE_DIFF_WAKEUP_EN;
+ writel_relaxed(config2, USB_GENCONFIG_2);
+
+ /*
+ * Abort suspend when,
+ * 1. charging detection in progress due to cable plug-in
+ * 2. host mode activation in progress due to Micro-A cable insertion
+ * 3. !BSV, but its handling is in progress by otg sm_work
+ * Don't abort suspend in case of dcp detected by PMIC
+ */
+
+ if ((test_bit(B_SESS_VLD, &motg->inputs) && !device_bus_suspend &&
+ !dcp && !motg->is_ext_chg_dcp && !prop_charger &&
+ !floated_charger) || sm_work_busy) {
+ msm_otg_dbg_log_event(phy, "LPM ENTER ABORTED",
+ motg->inputs, motg->chg_type);
enable_irq(motg->irq);
- return -ETIMEDOUT;
+ if (motg->phy_irq)
+ enable_irq(motg->phy_irq);
+ return -EBUSY;
+ }
+
+ if (motg->caps & ALLOW_VDD_MIN_WITH_RETENTION_DISABLED) {
+ /* put the controller in non-driving mode */
+ func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL);
+ func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
+ func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
+ ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL);
+ ulpi_write(phy, ULPI_IFC_CTRL_AUTORESUME,
+ ULPI_CLR(ULPI_IFC_CTRL));
+ }
+
+ /*
+ * PHY suspend sequence as mentioned in the databook.
+ *
+ * Device bus suspend: The controller may abort PHY suspend if
+ * there is an incoming reset or resume from the host. If PHCD
+ * is not set within 100 usec. Abort the LPM sequence.
+ *
+ * Host bus suspend: If the peripheral is attached, PHY is already
+ * put into suspend along with the peripheral bus suspend. poll for
+ * PHCD upto 5 msec. If the peripheral is not attached i.e entering
+ * LPM with Micro-A cable, set the PHCD and poll for it for 5 msec.
+ *
+ * No cable connected: Set the PHCD to suspend the PHY. Poll for PHCD
+ * upto 5 msec.
+ *
+ * The controller aborts PHY suspend only in device bus suspend case.
+ * In other cases, it is observed that PHCD may not get set within
+ * the timeout. If so, set the PHCD again and poll for it before
+ * reset recovery.
+ */
+
+phcd_retry:
+ if (device_bus_suspend)
+ phy_suspend_timeout = PHY_DEVICE_BUS_SUSPEND_TIMEOUT_USEC;
+ else
+ phy_suspend_timeout = PHY_SUSPEND_TIMEOUT_USEC;
+
+ cnt = 0;
+ portsc = readl_relaxed(USB_PORTSC);
+ if (!(portsc & PORTSC_PHCD)) {
+ writel_relaxed(portsc | PORTSC_PHCD,
+ USB_PORTSC);
+ while (cnt < phy_suspend_timeout) {
+ if (readl_relaxed(USB_PORTSC) & PORTSC_PHCD)
+ break;
+ udelay(1);
+ cnt++;
+ }
+ }
+
+ if (cnt >= phy_suspend_timeout) {
+ if (phcd_retry_cnt > PHY_SUSPEND_RETRIES_MAX) {
+ msm_otg_dbg_log_event(phy, "PHY SUSPEND FAILED",
+ phcd_retry_cnt, phy->otg->state);
+ dev_err(phy->dev, "PHY suspend failed\n");
+ ret = -EBUSY;
+ goto phy_suspend_fail;
+ }
+
+ if (device_bus_suspend) {
+ dev_dbg(phy->dev, "PHY suspend aborted\n");
+ ret = -EBUSY;
+ goto phy_suspend_fail;
+ } else {
+ if (phcd_retry_cnt++ < PHY_SUSPEND_RETRIES_MAX) {
+ dev_dbg(phy->dev, "PHY suspend retry\n");
+ goto phcd_retry;
+ } else {
+ dev_err(phy->dev, "reset attempt during PHY suspend\n");
+ phcd_retry_cnt++;
+ motg->reset_counter = 0;
+ msm_otg_reset(phy);
+ goto lpm_start;
+ }
+ }
}
/*
@@ -719,84 +1559,284 @@
* line must be disabled till async interrupt enable bit is cleared
* in USBCMD register. Assert STP (ULPI interface STOP signal) to
* block data communication from PHY.
+ *
+ * PHY retention mode is disallowed while entering to LPM with wall
+ * charger connected. But PHY is put into suspend mode. Hence
+ * enable asynchronous interrupt to detect charger disconnection when
+ * PMIC notifications are unavailable.
*/
- writel(readl(USB_USBCMD) | ASYNC_INTR_CTRL | ULPI_STP_CTRL, USB_USBCMD);
+ cmd_val = readl_relaxed(USB_USBCMD);
+ if (host_bus_suspend || device_bus_suspend ||
+ (motg->pdata->otg_control == OTG_PHY_CONTROL))
+ cmd_val |= ASYNC_INTR_CTRL | ULPI_STP_CTRL;
+ else
+ cmd_val |= ULPI_STP_CTRL;
+ writel_relaxed(cmd_val, USB_USBCMD);
- addr = USB_PHY_CTRL;
- if (motg->phy_number)
- addr = USB_PHY_CTRL2;
+ /*
+ * BC1.2 spec mandates PD to enable VDP_SRC when charging from DCP.
+ * PHY retention and collapse can not happen with VDP_SRC enabled.
+ */
- if (motg->pdata->phy_type == SNPS_28NM_INTEGRATED_PHY &&
- motg->pdata->otg_control == OTG_PMIC_CONTROL)
- writel(readl(addr) | PHY_RETEN, addr);
- clk_disable_unprepare(motg->pclk);
- clk_disable_unprepare(motg->clk);
- if (!IS_ERR(motg->core_clk))
- clk_disable_unprepare(motg->core_clk);
+ /*
+ * We come here in 3 scenarios.
+ *
+ * (1) No cable connected (out of session):
+ * - BSV/ID HV interrupts are enabled for PHY based detection.
+ * - PHY is put in retention.
+ * - If allowed (PMIC based detection), PHY is power collapsed.
+ * - DVDD (CX/MX) minimization and XO shutdown are allowed.
+ * - The wakeup is through VBUS/ID interrupt from PHY/PMIC/user.
+ * (2) USB wall charger:
+ * - BSV/ID HV interrupts are enabled for PHY based detection.
+ * - For BC1.2 compliant charger, retention is not allowed to
+ * keep VDP_SRC on. XO shutdown is allowed.
+ * - The wakeup is through VBUS/ID interrupt from PHY/PMIC/user.
+ * (3) Device/Host Bus suspend (if LPM is enabled):
+ * - BSV/ID HV interrupts are enabled for PHY based detection.
+ * - D+/D- MPM pin are configured to wakeup from line state
+ * change through PHY HV interrupts. PHY HV interrupts are
+ * also enabled. If MPM pins are not available, retention and
+ * XO is not allowed.
+ * - PHY is put into retention only if a gpio is used to keep
+ * the D+ pull-up. ALLOW_BUS_SUSPEND_WITHOUT_REWORK capability
+ * is set means, PHY can enable D+ pull-up or D+/D- pull-down
+ * without any re-work and PHY should not be put into retention.
+ * - DVDD (CX/MX) minimization and XO shutdown is allowed if
+ * ALLOW_BUS_SUSPEND_WITHOUT_REWORK is set (PHY DVDD is supplied
+ * via PMIC LDO) or board level re-work is present.
+ * - The wakeup is through VBUS/ID interrupt from PHY/PMIC/user
+ * or USB link asynchronous interrupt for line state change.
+ *
+ */
+ motg->host_bus_suspend = host_bus_suspend;
+ motg->device_bus_suspend = device_bus_suspend;
- if (motg->pdata->phy_type == SNPS_28NM_INTEGRATED_PHY &&
- motg->pdata->otg_control == OTG_PMIC_CONTROL) {
- msm_hsusb_ldo_set_mode(motg, 0);
- msm_hsusb_config_vddcx(motg, 0);
+ if (motg->caps & ALLOW_PHY_RETENTION && !device_bus_suspend && !dcp &&
+ (!host_bus_suspend || (motg->caps &
+ ALLOW_BUS_SUSPEND_WITHOUT_REWORK) ||
+ ((motg->caps & ALLOW_HOST_PHY_RETENTION)
+ && (pdata->dpdm_pulldown_added || !(portsc & PORTSC_CCS))))) {
+ msm_otg_enable_phy_hv_int(motg);
+ if ((!host_bus_suspend || !(motg->caps &
+ ALLOW_BUS_SUSPEND_WITHOUT_REWORK)) &&
+ !(motg->caps & ALLOW_VDD_MIN_WITH_RETENTION_DISABLED)) {
+ msm_otg_enter_phy_retention(motg);
+ motg->lpm_flags |= PHY_RETENTIONED;
+ }
+ } else if (device_bus_suspend && !dcp &&
+ (pdata->mpm_dpshv_int || pdata->mpm_dmshv_int)) {
+ /* DP DM HV interrupts are used for bus resume from XO off */
+ msm_otg_enable_phy_hv_int(motg);
+ if (motg->caps & ALLOW_PHY_RETENTION && pdata->vddmin_gpio) {
+
+ /*
+ * This is HW WA needed when PHY_CLAMP_DPDMSE_EN is
+ * enabled and we put the phy in retention mode.
+ * Without this WA, the async_irq will be fired right
+ * after suspending whithout any bus resume.
+ */
+ config2 = readl_relaxed(USB_GENCONFIG_2);
+ config2 &= ~GENCONFIG_2_DPSE_DMSE_HV_INTR_EN;
+ writel_relaxed(config2, USB_GENCONFIG_2);
+
+ msm_otg_enter_phy_retention(motg);
+ motg->lpm_flags |= PHY_RETENTIONED;
+ gpio_direction_output(pdata->vddmin_gpio, 1);
+ }
}
- if (device_may_wakeup(phy->dev))
- enable_irq_wake(motg->irq);
+ /* Ensure that above operation is completed before turning off clocks */
+ mb();
+ /* Consider clocks on workaround flag only in case of bus suspend */
+ if (!(phy->otg->state == OTG_STATE_B_PERIPHERAL &&
+ test_bit(A_BUS_SUSPEND, &motg->inputs)) ||
+ !motg->pdata->core_clk_always_on_workaround) {
+ clk_disable_unprepare(motg->pclk);
+ clk_disable_unprepare(motg->core_clk);
+ if (motg->phy_csr_clk)
+ clk_disable_unprepare(motg->phy_csr_clk);
+ motg->lpm_flags |= CLOCKS_DOWN;
+ }
+
+ /* usb phy no more require TCXO clock, hence vote for TCXO disable */
+ if (!host_bus_suspend || (motg->caps &
+ ALLOW_BUS_SUSPEND_WITHOUT_REWORK) ||
+ ((motg->caps & ALLOW_HOST_PHY_RETENTION) &&
+ (pdata->dpdm_pulldown_added || !(portsc & PORTSC_CCS)))) {
+ if (motg->xo_clk) {
+ clk_disable_unprepare(motg->xo_clk);
+ motg->lpm_flags |= XO_SHUTDOWN;
+ }
+ }
+
+ if (motg->caps & ALLOW_PHY_POWER_COLLAPSE &&
+ !host_bus_suspend && !dcp && !device_bus_suspend) {
+ msm_hsusb_ldo_enable(motg, USB_PHY_REG_OFF);
+ motg->lpm_flags |= PHY_PWR_COLLAPSED;
+ } else if (motg->caps & ALLOW_PHY_REGULATORS_LPM &&
+ !host_bus_suspend && !device_bus_suspend && !dcp) {
+ msm_hsusb_ldo_enable(motg, USB_PHY_REG_LPM_ON);
+ motg->lpm_flags |= PHY_REGULATORS_LPM;
+ }
+
+ if (motg->lpm_flags & PHY_RETENTIONED ||
+ (motg->caps & ALLOW_VDD_MIN_WITH_RETENTION_DISABLED)) {
+ regulator_disable(hsusb_vdd);
+ msm_hsusb_config_vddcx(0);
+ }
+
+ if (device_may_wakeup(phy->dev)) {
+ if (host_bus_suspend || device_bus_suspend) {
+ enable_irq_wake(motg->async_irq);
+ enable_irq_wake(motg->irq);
+ }
+
+ if (motg->phy_irq)
+ enable_irq_wake(motg->phy_irq);
+ if (motg->pdata->pmic_id_irq)
+ enable_irq_wake(motg->pdata->pmic_id_irq);
+ if (motg->ext_id_irq)
+ enable_irq_wake(motg->ext_id_irq);
+ if (pdata->otg_control == OTG_PHY_CONTROL &&
+ pdata->mpm_otgsessvld_int)
+ msm_mpm_set_pin_wake(pdata->mpm_otgsessvld_int, 1);
+ if ((host_bus_suspend || device_bus_suspend) &&
+ pdata->mpm_dpshv_int)
+ msm_mpm_set_pin_wake(pdata->mpm_dpshv_int, 1);
+ if ((host_bus_suspend || device_bus_suspend) &&
+ pdata->mpm_dmshv_int)
+ msm_mpm_set_pin_wake(pdata->mpm_dmshv_int, 1);
+ }
if (bus)
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &(bus_to_hcd(bus))->flags);
- atomic_set(&motg->in_lpm, 1);
- enable_irq(motg->irq);
+ msm_otg_bus_vote(motg, USB_NO_PERF_VOTE);
+ atomic_set(&motg->in_lpm, 1);
+
+ /* Enable ASYNC IRQ during LPM */
+ enable_irq(motg->async_irq);
+ if (motg->phy_irq)
+ enable_irq(motg->phy_irq);
+
+ enable_irq(motg->irq);
+ pm_relax(&motg->pdev->dev);
+
+ dev_dbg(phy->dev, "LPM caps = %lu flags = %lu\n",
+ motg->caps, motg->lpm_flags);
dev_info(phy->dev, "USB in low power mode\n");
+ msm_otg_dbg_log_event(phy, "LPM ENTER DONE",
+ motg->caps, motg->lpm_flags);
+
+ if (motg->err_event_seen) {
+ motg->err_event_seen = false;
+ if (motg->vbus_state != test_bit(B_SESS_VLD, &motg->inputs))
+ msm_otg_set_vbus_state(motg->vbus_state);
+ if (motg->id_state != test_bit(ID, &motg->inputs))
+ msm_id_status_w(&motg->id_status_work.work);
+ }
return 0;
+
+phy_suspend_fail:
+ enable_irq(motg->irq);
+ if (motg->phy_irq)
+ enable_irq(motg->phy_irq);
+ return ret;
}
static int msm_otg_resume(struct msm_otg *motg)
{
struct usb_phy *phy = &motg->phy;
struct usb_bus *bus = phy->otg->host;
- void __iomem *addr;
+ struct usb_hcd *hcd = bus_to_hcd(phy->otg->host);
+ struct msm_otg_platform_data *pdata = motg->pdata;
int cnt = 0;
- unsigned temp;
+ unsigned int temp;
+ unsigned int ret;
+ u32 func_ctrl;
- if (!atomic_read(&motg->in_lpm))
+ msm_otg_dbg_log_event(phy, "LPM EXIT START", motg->inputs,
+ phy->otg->state);
+ if (!atomic_read(&motg->in_lpm)) {
+ msm_otg_dbg_log_event(phy, "USB NOT IN LPM",
+ atomic_read(&motg->in_lpm), phy->otg->state);
return 0;
-
- clk_prepare_enable(motg->pclk);
- clk_prepare_enable(motg->clk);
- if (!IS_ERR(motg->core_clk))
- clk_prepare_enable(motg->core_clk);
-
- if (motg->pdata->phy_type == SNPS_28NM_INTEGRATED_PHY &&
- motg->pdata->otg_control == OTG_PMIC_CONTROL) {
-
- addr = USB_PHY_CTRL;
- if (motg->phy_number)
- addr = USB_PHY_CTRL2;
-
- msm_hsusb_ldo_set_mode(motg, 1);
- msm_hsusb_config_vddcx(motg, 1);
- writel(readl(addr) & ~PHY_RETEN, addr);
}
- temp = readl(USB_USBCMD);
+ disable_irq(motg->irq);
+ pm_stay_awake(&motg->pdev->dev);
+
+ /*
+ * If we are resuming from the device bus suspend, restore
+ * the max performance bus vote. Otherwise put a minimum
+ * bus vote to satisfy the requirement for enabling clocks.
+ */
+
+ if (motg->device_bus_suspend && debug_bus_voting_enabled)
+ msm_otg_bus_vote(motg, USB_MAX_PERF_VOTE);
+ else
+ msm_otg_bus_vote(motg, USB_MIN_PERF_VOTE);
+
+ /* Vote for TCXO when waking up the phy */
+ if (motg->lpm_flags & XO_SHUTDOWN) {
+ if (motg->xo_clk)
+ clk_prepare_enable(motg->xo_clk);
+ motg->lpm_flags &= ~XO_SHUTDOWN;
+ }
+
+ if (motg->lpm_flags & CLOCKS_DOWN) {
+ if (motg->phy_csr_clk) {
+ ret = clk_prepare_enable(motg->phy_csr_clk);
+ WARN(ret, "USB phy_csr_clk enable failed\n");
+ }
+ ret = clk_prepare_enable(motg->core_clk);
+ WARN(ret, "USB core_clk enable failed\n");
+ ret = clk_prepare_enable(motg->pclk);
+ WARN(ret, "USB pclk enable failed\n");
+ motg->lpm_flags &= ~CLOCKS_DOWN;
+ }
+
+ if (motg->lpm_flags & PHY_PWR_COLLAPSED) {
+ msm_hsusb_ldo_enable(motg, USB_PHY_REG_ON);
+ motg->lpm_flags &= ~PHY_PWR_COLLAPSED;
+ } else if (motg->lpm_flags & PHY_REGULATORS_LPM) {
+ msm_hsusb_ldo_enable(motg, USB_PHY_REG_LPM_OFF);
+ motg->lpm_flags &= ~PHY_REGULATORS_LPM;
+ }
+
+ if (motg->lpm_flags & PHY_RETENTIONED ||
+ (motg->caps & ALLOW_VDD_MIN_WITH_RETENTION_DISABLED)) {
+ msm_hsusb_config_vddcx(1);
+ ret = regulator_enable(hsusb_vdd);
+ WARN(ret, "hsusb_vdd LDO enable failed\n");
+ msm_otg_disable_phy_hv_int(motg);
+ msm_otg_exit_phy_retention(motg);
+ motg->lpm_flags &= ~PHY_RETENTIONED;
+ if (pdata->vddmin_gpio && motg->device_bus_suspend)
+ gpio_direction_input(pdata->vddmin_gpio);
+ } else if (motg->device_bus_suspend) {
+ msm_otg_disable_phy_hv_int(motg);
+ }
+
+ temp = readl_relaxed(USB_USBCMD);
temp &= ~ASYNC_INTR_CTRL;
temp &= ~ULPI_STP_CTRL;
- writel(temp, USB_USBCMD);
+ writel_relaxed(temp, USB_USBCMD);
/*
* PHY comes out of low power mode (LPM) in case of wakeup
* from asynchronous interrupt.
*/
- if (!(readl(USB_PORTSC) & PORTSC_PHCD))
+ if (!(readl_relaxed(USB_PORTSC) & PORTSC_PHCD))
goto skip_phy_resume;
- writel(readl(USB_PORTSC) & ~PORTSC_PHCD, USB_PORTSC);
+ writel_relaxed(readl_relaxed(USB_PORTSC) & ~PORTSC_PHCD, USB_PORTSC);
+
while (cnt < PHY_RESUME_TIMEOUT_USEC) {
- if (!(readl(USB_PORTSC) & PORTSC_PHCD))
+ if (!(readl_relaxed(USB_PORTSC) & PORTSC_PHCD))
break;
udelay(1);
cnt++;
@@ -808,41 +1848,235 @@
* PHY. USB state can not be restored. Re-insertion
* of USB cable is the only way to get USB working.
*/
- dev_err(phy->dev, "Unable to resume USB. Re-plugin the cable\n");
+ dev_err(phy->dev, "Unable to resume USB. Re-plugin the cable\n"
+ );
msm_otg_reset(phy);
}
skip_phy_resume:
- if (device_may_wakeup(phy->dev))
- disable_irq_wake(motg->irq);
+ if (motg->caps & ALLOW_VDD_MIN_WITH_RETENTION_DISABLED) {
+ /* put the controller in normal mode */
+ func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL);
+ func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
+ func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NORMAL;
+ ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL);
+ }
+
+ if (device_may_wakeup(phy->dev)) {
+ if (motg->host_bus_suspend || motg->device_bus_suspend) {
+ disable_irq_wake(motg->async_irq);
+ disable_irq_wake(motg->irq);
+ }
+
+ if (motg->phy_irq)
+ disable_irq_wake(motg->phy_irq);
+ if (motg->pdata->pmic_id_irq)
+ disable_irq_wake(motg->pdata->pmic_id_irq);
+ if (motg->ext_id_irq)
+ disable_irq_wake(motg->ext_id_irq);
+ if (pdata->otg_control == OTG_PHY_CONTROL &&
+ pdata->mpm_otgsessvld_int)
+ msm_mpm_set_pin_wake(pdata->mpm_otgsessvld_int, 0);
+ if ((motg->host_bus_suspend || motg->device_bus_suspend) &&
+ pdata->mpm_dpshv_int)
+ msm_mpm_set_pin_wake(pdata->mpm_dpshv_int, 0);
+ if ((motg->host_bus_suspend || motg->device_bus_suspend) &&
+ pdata->mpm_dmshv_int)
+ msm_mpm_set_pin_wake(pdata->mpm_dmshv_int, 0);
+ }
if (bus)
set_bit(HCD_FLAG_HW_ACCESSIBLE, &(bus_to_hcd(bus))->flags);
atomic_set(&motg->in_lpm, 0);
if (motg->async_int) {
+ /* Match the disable_irq call from ISR */
+ enable_irq(motg->async_int);
motg->async_int = 0;
- pm_runtime_put(phy->dev);
- enable_irq(motg->irq);
+ }
+ enable_irq(motg->irq);
+
+ /* Enable ASYNC_IRQ only during LPM */
+ disable_irq(motg->async_irq);
+
+ if (motg->phy_irq_pending) {
+ motg->phy_irq_pending = false;
+ msm_id_status_w(&motg->id_status_work.work);
+ }
+
+ if (motg->host_bus_suspend) {
+ usb_hcd_resume_root_hub(hcd);
+ schedule_delayed_work(&motg->perf_vote_work,
+ msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
}
dev_info(phy->dev, "USB exited from low power mode\n");
+ msm_otg_dbg_log_event(phy, "LPM EXIT DONE",
+ motg->caps, motg->lpm_flags);
return 0;
}
#endif
-static void msm_otg_notify_charger(struct msm_otg *motg, unsigned mA)
+static void msm_otg_notify_host_mode(struct msm_otg *motg, bool host_mode)
{
+ if (!psy) {
+ pr_err("No USB power supply registered!\n");
+ return;
+ }
+
+ motg->host_mode = host_mode;
+ power_supply_changed(psy);
+}
+
+static int msm_otg_notify_chg_type(struct msm_otg *motg)
+{
+ static int charger_type;
+ union power_supply_propval pval = {0};
+
+ /*
+ * TODO
+ * Unify OTG driver charger types and power supply charger types
+ */
+ if (charger_type == motg->chg_type)
+ return 0;
+
+ if (motg->chg_type == USB_SDP_CHARGER)
+ charger_type = POWER_SUPPLY_TYPE_USB;
+ else if (motg->chg_type == USB_CDP_CHARGER)
+ charger_type = POWER_SUPPLY_TYPE_USB_CDP;
+ else if (motg->chg_type == USB_DCP_CHARGER ||
+ motg->chg_type == USB_NONCOMPLIANT_CHARGER ||
+ motg->chg_type == USB_FLOATED_CHARGER)
+ charger_type = POWER_SUPPLY_TYPE_USB_DCP;
+ else
+ charger_type = POWER_SUPPLY_TYPE_UNKNOWN;
+
+ if (!psy) {
+ pr_err("No USB power supply registered!\n");
+ return -EINVAL;
+ }
+
+ pr_debug("setting usb power supply type %d\n", charger_type);
+ msm_otg_dbg_log_event(&motg->phy, "SET USB PWR SUPPLY TYPE",
+ motg->chg_type, charger_type);
+ pval.intval = charger_type;
+ power_supply_set_property(psy, POWER_SUPPLY_PROP_TYPE, &pval);
+ return 0;
+}
+
+static int msm_otg_notify_power_supply(struct msm_otg *motg, unsigned int mA)
+{
+ union power_supply_propval pval = {0};
+ bool enable;
+ int limit;
+
+ if (!psy) {
+ dev_dbg(motg->phy.dev, "no usb power supply registered\n");
+ goto psy_error;
+ }
+
+ if (motg->cur_power == 0 && mA > 2) {
+ /* Enable charging */
+ enable = true;
+ limit = 1000 * mA;
+ } else if (motg->cur_power >= 0 && (mA == 0 || mA == 2)) {
+ /* Disable charging */
+ enable = false;
+ /* Set max current limit in uA */
+ limit = 1000 * mA;
+ } else {
+ enable = true;
+ /* Current has changed (100/2 --> 500) */
+ limit = 1000 * mA;
+ }
+
+ pval.intval = enable;
+ if (power_supply_set_property(psy, POWER_SUPPLY_PROP_ONLINE, &pval))
+ goto psy_error;
+
+ pval.intval = limit;
+ if (power_supply_set_property(psy, POWER_SUPPLY_PROP_CURRENT_MAX,
+ &pval))
+ goto psy_error;
+
+ power_supply_changed(psy);
+ return 0;
+
+psy_error:
+ dev_dbg(motg->phy.dev, "power supply error when setting property\n");
+ return -ENXIO;
+}
+
+static void msm_otg_set_online_status(struct msm_otg *motg)
+{
+ union power_supply_propval pval = {0};
+
+ if (!psy) {
+ dev_dbg(motg->phy.dev, "no usb power supply registered\n");
+ return;
+ }
+
+ /* Set power supply online status to false */
+ pval.intval = false;
+ if (power_supply_set_property(psy, POWER_SUPPLY_PROP_ONLINE, &pval))
+ dev_dbg(motg->phy.dev, "error setting power supply property\n");
+}
+
+static void msm_otg_notify_charger(struct msm_otg *motg, unsigned int mA)
+{
+ struct usb_gadget *g = motg->phy.otg->gadget;
+ struct msm_otg_platform_data *pdata = motg->pdata;
+
+ if (g && g->is_a_peripheral)
+ return;
+
+ dev_dbg(motg->phy.dev, "Requested curr from USB = %u, max-type-c:%u\n",
+ mA, motg->typec_current_max);
+ /* Save bc1.2 max_curr if type-c charger later moves to diff mode */
+ motg->bc1p2_current_max = mA;
+
+ /*
+ * Limit type-c charger current to 500 for SDP charger to avoid more
+ * current drawn than 500 with Hosts that don't support type C due to
+ * non compliant type-c to standard A cables.
+ */
+ if (pdata->enable_sdp_typec_current_limit &&
+ (motg->chg_type == USB_SDP_CHARGER) &&
+ motg->typec_current_max > 500)
+ motg->typec_current_max = 500;
+
+ /* Override mA if type-c charger used (use hvdcp/bc1.2 if it is 500) */
+ if (motg->typec_current_max > 500 && mA < motg->typec_current_max)
+ mA = motg->typec_current_max;
+
+ if (msm_otg_notify_chg_type(motg))
+ dev_err(motg->phy.dev,
+ "Failed notifying %d charger type to PMIC\n",
+ motg->chg_type);
+
+ /*
+ * This condition will be true when usb cable is disconnected
+ * during bootup before enumeration. Check charger type also
+ * to avoid clearing online flag in case of valid charger.
+ */
+ if (motg->online && motg->cur_power == 0 && mA == 0 &&
+ (motg->chg_type == USB_INVALID_CHARGER))
+ msm_otg_set_online_status(motg);
+
if (motg->cur_power == mA)
return;
- /* TODO: Notify PMIC about available current */
dev_info(motg->phy.dev, "Avail curr from USB = %u\n", mA);
+ msm_otg_dbg_log_event(&motg->phy, "AVAIL CURR FROM USB",
+ mA, motg->chg_type);
+
+ msm_otg_notify_power_supply(motg, mA);
+
motg->cur_power = mA;
}
-static int msm_otg_set_power(struct usb_phy *phy, unsigned mA)
+static int msm_otg_set_power(struct usb_phy *phy, unsigned int mA)
{
struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
@@ -859,43 +2093,180 @@
return 0;
}
-static void msm_otg_start_host(struct usb_phy *phy, int on)
-{
- struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
- struct msm_otg_platform_data *pdata = motg->pdata;
- struct usb_hcd *hcd;
+static void msm_hsusb_vbus_power(struct msm_otg *motg, bool on);
- if (!phy->otg->host)
+static void msm_otg_perf_vote_update(struct msm_otg *motg, bool perf_mode)
+{
+ static bool curr_perf_mode;
+ int ret, latency = motg->pm_qos_latency;
+ long clk_rate;
+
+ if (curr_perf_mode == perf_mode)
return;
- hcd = bus_to_hcd(phy->otg->host);
-
- if (on) {
- dev_dbg(phy->dev, "host on\n");
-
- if (pdata->vbus_power)
- pdata->vbus_power(1);
- /*
- * Some boards have a switch cotrolled by gpio
- * to enable/disable internal HUB. Enable internal
- * HUB before kicking the host.
- */
- if (pdata->setup_gpio)
- pdata->setup_gpio(OTG_STATE_A_HOST);
-#ifdef CONFIG_USB
- usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
- device_wakeup_enable(hcd->self.controller);
-#endif
+ if (perf_mode) {
+ if (latency)
+ pm_qos_update_request(&motg->pm_qos_req_dma, latency);
+ msm_otg_bus_vote(motg, USB_MAX_PERF_VOTE);
+ clk_rate = motg->core_clk_rate;
} else {
- dev_dbg(phy->dev, "host off\n");
+ if (latency)
+ pm_qos_update_request(&motg->pm_qos_req_dma,
+ PM_QOS_DEFAULT_VALUE);
+ msm_otg_bus_vote(motg, USB_MIN_PERF_VOTE);
+ clk_rate = motg->core_clk_svs_rate;
+ }
-#ifdef CONFIG_USB
- usb_remove_hcd(hcd);
+ if (clk_rate) {
+ ret = clk_set_rate(motg->core_clk, clk_rate);
+ if (ret)
+ dev_err(motg->phy.dev, "sys_clk set_rate fail:%d %ld\n",
+ ret, clk_rate);
+ }
+ curr_perf_mode = perf_mode;
+ pr_debug("%s: latency updated to: %d, core_freq to: %ld\n", __func__,
+ latency, clk_rate);
+}
+
+static void msm_otg_perf_vote_work(struct work_struct *w)
+{
+ struct msm_otg *motg = container_of(w, struct msm_otg,
+ perf_vote_work.work);
+ unsigned int curr_sample_int_count;
+ bool in_perf_mode = false;
+
+ curr_sample_int_count = motg->usb_irq_count;
+ motg->usb_irq_count = 0;
+
+ if (curr_sample_int_count >= PM_QOS_THRESHOLD)
+ in_perf_mode = true;
+
+ msm_otg_perf_vote_update(motg, in_perf_mode);
+ pr_debug("%s: in_perf_mode:%u, interrupts in last sample:%u\n",
+ __func__, in_perf_mode, curr_sample_int_count);
+
+ schedule_delayed_work(&motg->perf_vote_work,
+ msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
+}
+
+static void msm_otg_start_host(struct usb_otg *otg, int on)
+{
+ struct msm_otg *motg = container_of(otg->usb_phy, struct msm_otg, phy);
+ struct msm_otg_platform_data *pdata = motg->pdata;
+ struct usb_hcd *hcd;
+ u32 val;
+
+ if (!otg->host)
+ return;
+
+ hcd = bus_to_hcd(otg->host);
+
+ msm_otg_dbg_log_event(&motg->phy, "PM RT: StartHost GET",
+ get_pm_runtime_counter(motg->phy.dev), 0);
+ pm_runtime_get_sync(otg->usb_phy->dev);
+ if (on) {
+ dev_dbg(otg->usb_phy->dev, "host on\n");
+ msm_otg_dbg_log_event(&motg->phy, "HOST ON",
+ motg->inputs, otg->state);
+ msm_hsusb_vbus_power(motg, 1);
+ msm_otg_reset(&motg->phy);
+
+ if (pdata->otg_control == OTG_PHY_CONTROL)
+ ulpi_write(otg->usb_phy, OTG_COMP_DISABLE,
+ ULPI_SET(ULPI_PWR_CLK_MNG_REG));
+
+ if (pdata->enable_axi_prefetch) {
+ val = readl_relaxed(USB_HS_APF_CTRL);
+ val &= ~APF_CTRL_EN;
+ writel_relaxed(val, USB_HS_APF_CTRL);
+ }
+ usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
+#ifdef CONFIG_SMP
+ motg->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
+ motg->pm_qos_req_dma.irq = motg->irq;
#endif
- if (pdata->setup_gpio)
- pdata->setup_gpio(OTG_STATE_UNDEFINED);
- if (pdata->vbus_power)
- pdata->vbus_power(0);
+ pm_qos_add_request(&motg->pm_qos_req_dma,
+ PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+ /* start in perf mode for better performance initially */
+ msm_otg_perf_vote_update(motg, true);
+ schedule_delayed_work(&motg->perf_vote_work,
+ msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
+ } else {
+ dev_dbg(otg->usb_phy->dev, "host off\n");
+ msm_otg_dbg_log_event(&motg->phy, "HOST OFF",
+ motg->inputs, otg->state);
+ msm_hsusb_vbus_power(motg, 0);
+
+ cancel_delayed_work_sync(&motg->perf_vote_work);
+ msm_otg_perf_vote_update(motg, false);
+ pm_qos_remove_request(&motg->pm_qos_req_dma);
+
+ pm_runtime_disable(&hcd->self.root_hub->dev);
+ pm_runtime_barrier(&hcd->self.root_hub->dev);
+ usb_remove_hcd(hcd);
+ msm_otg_reset(&motg->phy);
+
+ if (pdata->enable_axi_prefetch)
+ writel_relaxed(readl_relaxed(USB_HS_APF_CTRL)
+ | (APF_CTRL_EN), USB_HS_APF_CTRL);
+
+ /* HCD core reset all bits of PORTSC. select ULPI phy */
+ writel_relaxed(0x80000000, USB_PORTSC);
+
+ if (pdata->otg_control == OTG_PHY_CONTROL)
+ ulpi_write(otg->usb_phy, OTG_COMP_DISABLE,
+ ULPI_CLR(ULPI_PWR_CLK_MNG_REG));
+ }
+ msm_otg_dbg_log_event(&motg->phy, "PM RT: StartHost PUT",
+ get_pm_runtime_counter(motg->phy.dev), 0);
+
+ pm_runtime_mark_last_busy(otg->usb_phy->dev);
+ pm_runtime_put_autosuspend(otg->usb_phy->dev);
+}
+
+static void msm_hsusb_vbus_power(struct msm_otg *motg, bool on)
+{
+ int ret;
+ static bool vbus_is_on;
+
+ msm_otg_dbg_log_event(&motg->phy, "VBUS POWER", on, vbus_is_on);
+ if (vbus_is_on == on)
+ return;
+
+ if (motg->pdata->vbus_power) {
+ ret = motg->pdata->vbus_power(on);
+ if (!ret)
+ vbus_is_on = on;
+ return;
+ }
+
+ if (!vbus_otg) {
+ pr_err("vbus_otg is NULL.");
+ return;
+ }
+
+ /*
+ * if entering host mode tell the charger to not draw any current
+ * from usb before turning on the boost.
+ * if exiting host mode disable the boost before enabling to draw
+ * current from the source.
+ */
+ if (on) {
+ msm_otg_notify_host_mode(motg, on);
+ ret = regulator_enable(vbus_otg);
+ if (ret) {
+ pr_err("unable to enable vbus_otg\n");
+ return;
+ }
+ vbus_is_on = true;
+ } else {
+ ret = regulator_disable(vbus_otg);
+ if (ret) {
+ pr_err("unable to disable vbus_otg\n");
+ return;
+ }
+ msm_otg_notify_host_mode(motg, on);
+ vbus_is_on = false;
}
}
@@ -908,18 +2279,28 @@
* Fail host registration if this board can support
* only peripheral configuration.
*/
- if (motg->pdata->mode == USB_DR_MODE_PERIPHERAL) {
+ if (motg->pdata->mode == USB_PERIPHERAL) {
dev_info(otg->usb_phy->dev, "Host mode is not supported\n");
return -ENODEV;
}
+ if (!motg->pdata->vbus_power && host) {
+ vbus_otg = devm_regulator_get(motg->phy.dev, "vbus_otg");
+ if (IS_ERR(vbus_otg)) {
+ msm_otg_dbg_log_event(&motg->phy,
+ "UNABLE TO GET VBUS_OTG",
+ otg->state, 0);
+ pr_err("Unable to get vbus_otg\n");
+ return PTR_ERR(vbus_otg);
+ }
+ }
+
if (!host) {
if (otg->state == OTG_STATE_A_HOST) {
- pm_runtime_get_sync(otg->usb_phy->dev);
- msm_otg_start_host(otg->usb_phy, 0);
+ msm_otg_start_host(otg, 0);
otg->host = NULL;
otg->state = OTG_STATE_UNDEFINED;
- schedule_work(&motg->sm_work);
+ queue_work(motg->otg_wq, &motg->sm_work);
} else {
otg->host = NULL;
}
@@ -932,38 +2313,97 @@
otg->host = host;
dev_dbg(otg->usb_phy->dev, "host driver registered w/ tranceiver\n");
+ msm_otg_dbg_log_event(&motg->phy, "HOST DRIVER REGISTERED",
+ hcd->power_budget, motg->pdata->mode);
- pm_runtime_get_sync(otg->usb_phy->dev);
- schedule_work(&motg->sm_work);
+ /*
+ * Kick the state machine work, if peripheral is not supported
+ * or peripheral is already registered with us.
+ */
+ if (motg->pdata->mode == USB_HOST || otg->gadget)
+ queue_work(motg->otg_wq, &motg->sm_work);
return 0;
}
-static void msm_otg_start_peripheral(struct usb_phy *phy, int on)
+static void msm_otg_start_peripheral(struct usb_otg *otg, int on)
{
- struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
+ struct msm_otg *motg = container_of(otg->usb_phy, struct msm_otg, phy);
struct msm_otg_platform_data *pdata = motg->pdata;
+ struct pinctrl_state *set_state;
+ int ret;
- if (!phy->otg->gadget)
+ if (!otg->gadget)
return;
+ msm_otg_dbg_log_event(&motg->phy, "PM RT: StartPeri GET",
+ get_pm_runtime_counter(motg->phy.dev), 0);
+ pm_runtime_get_sync(otg->usb_phy->dev);
if (on) {
- dev_dbg(phy->dev, "gadget on\n");
- /*
- * Some boards have a switch cotrolled by gpio
- * to enable/disable internal HUB. Disable internal
- * HUB before kicking the gadget.
- */
- if (pdata->setup_gpio)
- pdata->setup_gpio(OTG_STATE_B_PERIPHERAL);
- usb_gadget_vbus_connect(phy->otg->gadget);
- } else {
- dev_dbg(phy->dev, "gadget off\n");
- usb_gadget_vbus_disconnect(phy->otg->gadget);
- if (pdata->setup_gpio)
- pdata->setup_gpio(OTG_STATE_UNDEFINED);
- }
+ dev_dbg(otg->usb_phy->dev, "gadget on\n");
+ msm_otg_dbg_log_event(&motg->phy, "GADGET ON",
+ motg->inputs, otg->state);
+ /* Configure BUS performance parameters for MAX bandwidth */
+ if (debug_bus_voting_enabled)
+ msm_otg_bus_vote(motg, USB_MAX_PERF_VOTE);
+ /* bump up usb core_clk to default */
+ clk_set_rate(motg->core_clk, motg->core_clk_rate);
+
+ usb_gadget_vbus_connect(otg->gadget);
+
+ /*
+ * Request VDD min gpio, if need to support VDD
+ * minimazation during peripheral bus suspend.
+ */
+ if (pdata->vddmin_gpio) {
+ if (motg->phy_pinctrl) {
+ set_state =
+ pinctrl_lookup_state(motg->phy_pinctrl,
+ "hsusb_active");
+ if (IS_ERR(set_state)) {
+ pr_err("cannot get phy pinctrl active state\n");
+ } else {
+ pinctrl_select_state(motg->phy_pinctrl,
+ set_state);
+ }
+ }
+
+ ret = gpio_request(pdata->vddmin_gpio,
+ "MSM_OTG_VDD_MIN_GPIO");
+ if (ret < 0) {
+ dev_err(otg->usb_phy->dev, "gpio req failed for vdd min:%d\n",
+ ret);
+ pdata->vddmin_gpio = 0;
+ }
+ }
+ } else {
+ dev_dbg(otg->usb_phy->dev, "gadget off\n");
+ msm_otg_dbg_log_event(&motg->phy, "GADGET OFF",
+ motg->inputs, otg->state);
+ usb_gadget_vbus_disconnect(otg->gadget);
+ clear_bit(A_BUS_SUSPEND, &motg->inputs);
+ /* Configure BUS performance parameters to default */
+ msm_otg_bus_vote(motg, USB_MIN_PERF_VOTE);
+
+ if (pdata->vddmin_gpio) {
+ gpio_free(pdata->vddmin_gpio);
+ if (motg->phy_pinctrl) {
+ set_state =
+ pinctrl_lookup_state(motg->phy_pinctrl,
+ "hsusb_sleep");
+ if (IS_ERR(set_state))
+ pr_err("cannot get phy pinctrl sleep state\n");
+ else
+ pinctrl_select_state(motg->phy_pinctrl,
+ set_state);
+ }
+ }
+ }
+ msm_otg_dbg_log_event(&motg->phy, "PM RT: StartPeri PUT",
+ get_pm_runtime_counter(motg->phy.dev), 0);
+ pm_runtime_mark_last_busy(otg->usb_phy->dev);
+ pm_runtime_put_autosuspend(otg->usb_phy->dev);
}
static int msm_otg_set_peripheral(struct usb_otg *otg,
@@ -975,18 +2415,20 @@
* Fail peripheral registration if this board can support
* only host configuration.
*/
- if (motg->pdata->mode == USB_DR_MODE_HOST) {
+ if (motg->pdata->mode == USB_HOST) {
dev_info(otg->usb_phy->dev, "Peripheral mode is not supported\n");
return -ENODEV;
}
if (!gadget) {
if (otg->state == OTG_STATE_B_PERIPHERAL) {
- pm_runtime_get_sync(otg->usb_phy->dev);
- msm_otg_start_peripheral(otg->usb_phy, 0);
+ msm_otg_dbg_log_event(&motg->phy,
+ "PM RUNTIME: PERIPHERAL GET1",
+ get_pm_runtime_counter(otg->usb_phy->dev), 0);
+ msm_otg_start_peripheral(otg, 0);
otg->gadget = NULL;
otg->state = OTG_STATE_UNDEFINED;
- schedule_work(&motg->sm_work);
+ queue_work(motg->otg_wq, &motg->sm_work);
} else {
otg->gadget = NULL;
}
@@ -994,15 +2436,92 @@
return 0;
}
otg->gadget = gadget;
- dev_dbg(otg->usb_phy->dev,
- "peripheral driver registered w/ tranceiver\n");
+ dev_dbg(otg->usb_phy->dev, "peripheral driver registered w/ tranceiver\n");
+ msm_otg_dbg_log_event(&motg->phy, "PERIPHERAL DRIVER REGISTERED",
+ otg->state, motg->pdata->mode);
- pm_runtime_get_sync(otg->usb_phy->dev);
- schedule_work(&motg->sm_work);
+ /*
+ * Kick the state machine work, if host is not supported
+ * or host is already registered with us.
+ */
+ if (motg->pdata->mode == USB_PERIPHERAL || otg->host)
+ queue_work(motg->otg_wq, &motg->sm_work);
return 0;
}
+static bool msm_otg_read_pmic_id_state(struct msm_otg *motg)
+{
+ unsigned long flags;
+ bool id;
+ int ret;
+
+ if (!motg->pdata->pmic_id_irq)
+ return -ENODEV;
+
+ local_irq_save(flags);
+ ret = irq_get_irqchip_state(motg->pdata->pmic_id_irq,
+ IRQCHIP_STATE_LINE_LEVEL, &id);
+ local_irq_restore(flags);
+
+ /*
+ * If we can not read ID line state for some reason, treat
+ * it as float. This would prevent MHL discovery and kicking
+ * host mode unnecessarily.
+ */
+ if (ret < 0)
+ return true;
+
+ return !!id;
+}
+
+static bool msm_otg_read_phy_id_state(struct msm_otg *motg)
+{
+ u8 val;
+
+ /*
+ * clear the pending/outstanding interrupts and
+ * read the ID status from the SRC_STATUS register.
+ */
+ writeb_relaxed(USB_PHY_ID_MASK, USB2_PHY_USB_PHY_INTERRUPT_CLEAR1);
+
+ writeb_relaxed(0x1, USB2_PHY_USB_PHY_IRQ_CMD);
+ /*
+ * Databook says 200 usec delay is required for
+ * clearing the interrupts.
+ */
+ udelay(200);
+ writeb_relaxed(0x0, USB2_PHY_USB_PHY_IRQ_CMD);
+
+ val = readb_relaxed(USB2_PHY_USB_PHY_INTERRUPT_SRC_STATUS);
+ if (val & USB_PHY_IDDIG_1_0)
+ return false; /* ID is grounded */
+ else
+ return true;
+}
+
+static void msm_otg_chg_check_timer_func(unsigned long data)
+{
+ struct msm_otg *motg = (struct msm_otg *) data;
+ struct usb_otg *otg = motg->phy.otg;
+
+ if (atomic_read(&motg->in_lpm) ||
+ !test_bit(B_SESS_VLD, &motg->inputs) ||
+ otg->state != OTG_STATE_B_PERIPHERAL ||
+ otg->gadget->speed != USB_SPEED_UNKNOWN) {
+ dev_dbg(otg->usb_phy->dev, "Nothing to do in chg_check_timer\n");
+ return;
+ }
+
+ if ((readl_relaxed(USB_PORTSC) & PORTSC_LS) == PORTSC_LS) {
+ dev_dbg(otg->usb_phy->dev, "DCP is detected as SDP\n");
+ msm_otg_dbg_log_event(&motg->phy, "DCP IS DETECTED AS SDP",
+ otg->state, 0);
+ set_bit(B_FALSE_SDP, &motg->inputs);
+ queue_work(motg->otg_wq, &motg->sm_work);
+ }
+}
+
static bool msm_chg_check_secondary_det(struct msm_otg *motg)
{
struct usb_phy *phy = &motg->phy;
@@ -1010,11 +2529,8 @@
bool ret = false;
switch (motg->pdata->phy_type) {
- case CI_45NM_INTEGRATED_PHY:
- chg_det = ulpi_read(phy, 0x34);
- ret = chg_det & (1 << 4);
- break;
- case SNPS_28NM_INTEGRATED_PHY:
+ case SNPS_PICO_PHY:
+ case SNPS_FEMTO_PHY:
chg_det = ulpi_read(phy, 0x87);
ret = chg_det & 1;
break;
@@ -1027,30 +2543,10 @@
static void msm_chg_enable_secondary_det(struct msm_otg *motg)
{
struct usb_phy *phy = &motg->phy;
- u32 chg_det;
switch (motg->pdata->phy_type) {
- case CI_45NM_INTEGRATED_PHY:
- chg_det = ulpi_read(phy, 0x34);
- /* Turn off charger block */
- chg_det |= ~(1 << 1);
- ulpi_write(phy, chg_det, 0x34);
- udelay(20);
- /* control chg block via ULPI */
- chg_det &= ~(1 << 3);
- ulpi_write(phy, chg_det, 0x34);
- /* put it in host mode for enabling D- source */
- chg_det &= ~(1 << 2);
- ulpi_write(phy, chg_det, 0x34);
- /* Turn on chg detect block */
- chg_det &= ~(1 << 1);
- ulpi_write(phy, chg_det, 0x34);
- udelay(20);
- /* enable chg detection */
- chg_det &= ~(1 << 0);
- ulpi_write(phy, chg_det, 0x34);
- break;
- case SNPS_28NM_INTEGRATED_PHY:
+ case SNPS_PICO_PHY:
+ case SNPS_FEMTO_PHY:
/*
* Configure DM as current source, DP as current sink
* and enable battery charging comparators.
@@ -1071,13 +2567,13 @@
bool ret = false;
switch (motg->pdata->phy_type) {
- case CI_45NM_INTEGRATED_PHY:
- chg_det = ulpi_read(phy, 0x34);
- ret = chg_det & (1 << 4);
- break;
- case SNPS_28NM_INTEGRATED_PHY:
+ case SNPS_PICO_PHY:
+ case SNPS_FEMTO_PHY:
chg_det = ulpi_read(phy, 0x87);
ret = chg_det & 1;
+ /* Turn off VDP_SRC */
+ ulpi_write(phy, 0x3, 0x86);
+ msleep(20);
break;
default:
break;
@@ -1088,16 +2584,10 @@
static void msm_chg_enable_primary_det(struct msm_otg *motg)
{
struct usb_phy *phy = &motg->phy;
- u32 chg_det;
switch (motg->pdata->phy_type) {
- case CI_45NM_INTEGRATED_PHY:
- chg_det = ulpi_read(phy, 0x34);
- /* enable chg detection */
- chg_det &= ~(1 << 0);
- ulpi_write(phy, chg_det, 0x34);
- break;
- case SNPS_28NM_INTEGRATED_PHY:
+ case SNPS_PICO_PHY:
+ case SNPS_FEMTO_PHY:
/*
* Configure DP as current source, DM as current sink
* and enable battery charging comparators.
@@ -1117,11 +2607,8 @@
bool ret = false;
switch (motg->pdata->phy_type) {
- case CI_45NM_INTEGRATED_PHY:
- line_state = ulpi_read(phy, 0x15);
- ret = !(line_state & 1);
- break;
- case SNPS_28NM_INTEGRATED_PHY:
+ case SNPS_PICO_PHY:
+ case SNPS_FEMTO_PHY:
line_state = ulpi_read(phy, 0x87);
ret = line_state & 2;
break;
@@ -1134,17 +2621,19 @@
static void msm_chg_disable_dcd(struct msm_otg *motg)
{
struct usb_phy *phy = &motg->phy;
- u32 chg_det;
switch (motg->pdata->phy_type) {
- case CI_45NM_INTEGRATED_PHY:
- chg_det = ulpi_read(phy, 0x34);
- chg_det &= ~(1 << 5);
- ulpi_write(phy, chg_det, 0x34);
- break;
- case SNPS_28NM_INTEGRATED_PHY:
+ case SNPS_PICO_PHY:
ulpi_write(phy, 0x10, 0x86);
break;
+ case SNPS_FEMTO_PHY:
+ ulpi_write(phy, 0x10, 0x86);
+ /*
+ * Disable the Rdm_down after
+ * the DCD is completed.
+ */
+ ulpi_write(phy, 0x04, 0x0C);
+ break;
default:
break;
}
@@ -1153,19 +2642,26 @@
static void msm_chg_enable_dcd(struct msm_otg *motg)
{
struct usb_phy *phy = &motg->phy;
- u32 chg_det;
switch (motg->pdata->phy_type) {
- case CI_45NM_INTEGRATED_PHY:
- chg_det = ulpi_read(phy, 0x34);
- /* Turn on D+ current source */
- chg_det |= (1 << 5);
- ulpi_write(phy, chg_det, 0x34);
- break;
- case SNPS_28NM_INTEGRATED_PHY:
+ case SNPS_PICO_PHY:
/* Data contact detection enable */
ulpi_write(phy, 0x10, 0x85);
break;
+ case SNPS_FEMTO_PHY:
+ /*
+ * Idp_src and Rdm_down are de-coupled
+ * on Femto PHY. If Idp_src alone is
+ * enabled, DCD timeout is observed with
+ * wall charger. But a genuine DCD timeout
+ * may be incorrectly interpreted. Also
+ * BC1.2 compliance testers expect Rdm_down
+ * to enabled during DCD. Enable Rdm_down
+ * explicitly before enabling the DCD.
+ */
+ ulpi_write(phy, 0x04, 0x0B);
+ ulpi_write(phy, 0x10, 0x85);
+ break;
default:
break;
}
@@ -1174,7 +2670,7 @@
static void msm_chg_block_on(struct msm_otg *motg)
{
struct usb_phy *phy = &motg->phy;
- u32 func_ctrl, chg_det;
+ u32 func_ctrl;
/* put the controller in non-driving mode */
func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL);
@@ -1183,19 +2679,12 @@
ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL);
switch (motg->pdata->phy_type) {
- case CI_45NM_INTEGRATED_PHY:
- chg_det = ulpi_read(phy, 0x34);
- /* control chg block via ULPI */
- chg_det &= ~(1 << 3);
- ulpi_write(phy, chg_det, 0x34);
- /* Turn on chg detect block */
- chg_det &= ~(1 << 1);
- ulpi_write(phy, chg_det, 0x34);
- udelay(20);
- break;
- case SNPS_28NM_INTEGRATED_PHY:
+ case SNPS_PICO_PHY:
+ case SNPS_FEMTO_PHY:
+ /* disable DP and DM pull down resistors */
+ ulpi_write(phy, 0x6, 0xC);
/* Clear charger detecting control bits */
- ulpi_write(phy, 0x3F, 0x86);
+ ulpi_write(phy, 0x1F, 0x86);
/* Clear alt interrupt latch and enable bits */
ulpi_write(phy, 0x1F, 0x92);
ulpi_write(phy, 0x1F, 0x95);
@@ -1209,21 +2698,18 @@
static void msm_chg_block_off(struct msm_otg *motg)
{
struct usb_phy *phy = &motg->phy;
- u32 func_ctrl, chg_det;
+ u32 func_ctrl;
switch (motg->pdata->phy_type) {
- case CI_45NM_INTEGRATED_PHY:
- chg_det = ulpi_read(phy, 0x34);
- /* Turn off charger block */
- chg_det |= ~(1 << 1);
- ulpi_write(phy, chg_det, 0x34);
- break;
- case SNPS_28NM_INTEGRATED_PHY:
+ case SNPS_PICO_PHY:
+ case SNPS_FEMTO_PHY:
/* Clear charger detecting control bits */
ulpi_write(phy, 0x3F, 0x86);
/* Clear alt interrupt latch and enable bits */
ulpi_write(phy, 0x1F, 0x92);
ulpi_write(phy, 0x1F, 0x95);
+ /* re-enable DP and DM pull down resistors */
+ ulpi_write(phy, 0x6, 0xB);
break;
default:
break;
@@ -1236,31 +2722,53 @@
ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL);
}
-#define MSM_CHG_DCD_POLL_TIME (100 * HZ/1000) /* 100 msec */
-#define MSM_CHG_DCD_MAX_RETRIES 6 /* Tdcd_tmout = 6 * 100 msec */
-#define MSM_CHG_PRIMARY_DET_TIME (40 * HZ/1000) /* TVDPSRC_ON */
-#define MSM_CHG_SECONDARY_DET_TIME (40 * HZ/1000) /* TVDMSRC_ON */
+static const char *chg_to_string(enum usb_chg_type chg_type)
+{
+ switch (chg_type) {
+ case USB_SDP_CHARGER: return "USB_SDP_CHARGER";
+ case USB_DCP_CHARGER: return "USB_DCP_CHARGER";
+ case USB_CDP_CHARGER: return "USB_CDP_CHARGER";
+ case USB_NONCOMPLIANT_CHARGER: return "USB_NONCOMPLIANT_CHARGER";
+ case USB_FLOATED_CHARGER: return "USB_FLOATED_CHARGER";
+ default: return "INVALID_CHARGER";
+ }
+}
+
+#define MSM_CHG_DCD_TIMEOUT (750 * HZ/1000) /* 750 msec */
+#define MSM_CHG_DCD_POLL_TIME (50 * HZ/1000) /* 50 msec */
+#define MSM_CHG_PRIMARY_DET_TIME (50 * HZ/1000) /* TVDPSRC_ON */
+#define MSM_CHG_SECONDARY_DET_TIME (50 * HZ/1000) /* TVDMSRC_ON */
static void msm_chg_detect_work(struct work_struct *w)
{
struct msm_otg *motg = container_of(w, struct msm_otg, chg_work.work);
struct usb_phy *phy = &motg->phy;
- bool is_dcd, tmout, vout;
+ bool is_dcd = false, tmout, vout;
+ static bool dcd;
+ u32 line_state, dm_vlgc;
unsigned long delay;
dev_dbg(phy->dev, "chg detection work\n");
+ msm_otg_dbg_log_event(phy, "CHG DETECTION WORK",
+ motg->chg_state, get_pm_runtime_counter(phy->dev));
+
switch (motg->chg_state) {
case USB_CHG_STATE_UNDEFINED:
- pm_runtime_get_sync(phy->dev);
+ case USB_CHG_STATE_IN_PROGRESS:
msm_chg_block_on(motg);
msm_chg_enable_dcd(motg);
motg->chg_state = USB_CHG_STATE_WAIT_FOR_DCD;
- motg->dcd_retries = 0;
+ motg->dcd_time = 0;
delay = MSM_CHG_DCD_POLL_TIME;
break;
case USB_CHG_STATE_WAIT_FOR_DCD:
is_dcd = msm_chg_check_dcd(motg);
- tmout = ++motg->dcd_retries == MSM_CHG_DCD_MAX_RETRIES;
+ motg->dcd_time += MSM_CHG_DCD_POLL_TIME;
+ tmout = motg->dcd_time >= MSM_CHG_DCD_TIMEOUT;
if (is_dcd || tmout) {
+ if (is_dcd)
+ dcd = true;
+ else
+ dcd = false;
msm_chg_disable_dcd(motg);
msm_chg_enable_primary_det(motg);
delay = MSM_CHG_PRIMARY_DET_TIME;
@@ -1271,14 +2779,29 @@
break;
case USB_CHG_STATE_DCD_DONE:
vout = msm_chg_check_primary_det(motg);
- if (vout) {
- msm_chg_enable_secondary_det(motg);
- delay = MSM_CHG_SECONDARY_DET_TIME;
- motg->chg_state = USB_CHG_STATE_PRIMARY_DONE;
- } else {
- motg->chg_type = USB_SDP_CHARGER;
+ line_state = readl_relaxed(USB_PORTSC) & PORTSC_LS;
+ dm_vlgc = line_state & PORTSC_LS_DM;
+ if (vout && !dm_vlgc) { /* VDAT_REF < DM < VLGC */
+ if (line_state) { /* DP > VLGC */
+ motg->chg_type = USB_NONCOMPLIANT_CHARGER;
+ motg->chg_state = USB_CHG_STATE_DETECTED;
+ delay = 0;
+ } else {
+ msm_chg_enable_secondary_det(motg);
+ delay = MSM_CHG_SECONDARY_DET_TIME;
+ motg->chg_state = USB_CHG_STATE_PRIMARY_DONE;
+ }
+ } else { /* DM < VDAT_REF || DM > VLGC */
+ if (line_state) /* DP > VLGC or/and DM > VLGC */
+ motg->chg_type = USB_NONCOMPLIANT_CHARGER;
+ else if (!dcd && floated_charger_enable)
+ motg->chg_type = USB_FLOATED_CHARGER;
+ else
+ motg->chg_type = USB_SDP_CHARGER;
+
motg->chg_state = USB_CHG_STATE_DETECTED;
delay = 0;
+ goto state_detected;
}
break;
case USB_CHG_STATE_PRIMARY_DONE:
@@ -1292,17 +2815,42 @@
case USB_CHG_STATE_SECONDARY_DONE:
motg->chg_state = USB_CHG_STATE_DETECTED;
case USB_CHG_STATE_DETECTED:
+state_detected:
+ /*
+ * Notify the charger type to power supply
+ * owner as soon as we determine the charger.
+ */
+ if (motg->chg_type == USB_DCP_CHARGER && motg->ext_chg_opened) {
+ init_completion(&motg->ext_chg_wait);
+ motg->ext_chg_active = DEFAULT;
+ }
+ msm_otg_notify_chg_type(motg);
msm_chg_block_off(motg);
- dev_dbg(phy->dev, "charger = %d\n", motg->chg_type);
- schedule_work(&motg->sm_work);
+
+ /* Enable VDP_SRC in case of DCP charger */
+ if (motg->chg_type == USB_DCP_CHARGER)
+ ulpi_write(phy, 0x2, 0x85);
+
+ dev_dbg(phy->dev, "chg_type = %s\n",
+ chg_to_string(motg->chg_type));
+ msm_otg_dbg_log_event(phy, "CHG WORK PUT: CHG_TYPE",
+ motg->chg_type, get_pm_runtime_counter(phy->dev));
+ /* to match _get from sm_work before starting chg_det_work */
+ pm_runtime_mark_last_busy(phy->dev);
+ pm_runtime_put_autosuspend(phy->dev);
+
+ queue_work(motg->otg_wq, &motg->sm_work);
return;
default:
return;
}
- schedule_delayed_work(&motg->chg_work, delay);
+ msm_otg_dbg_log_event(phy, "CHG WORK: QUEUE", motg->chg_type, delay);
+ queue_delayed_work(motg->otg_wq, &motg->chg_work, delay);
}
+#define VBUS_INIT_TIMEOUT msecs_to_jiffies(5000)
+
/*
* We support OTG, Peripheral only and Host only configurations. In case
* of OTG, mode switch (host-->peripheral/peripheral-->host) can happen
@@ -1313,84 +2861,241 @@
static void msm_otg_init_sm(struct msm_otg *motg)
{
struct msm_otg_platform_data *pdata = motg->pdata;
- u32 otgsc = readl(USB_OTGSC);
+ u32 otgsc = readl_relaxed(USB_OTGSC);
+ int ret;
switch (pdata->mode) {
- case USB_DR_MODE_OTG:
- if (pdata->otg_control == OTG_PHY_CONTROL) {
+ case USB_OTG:
+ if (pdata->otg_control == OTG_USER_CONTROL) {
+ if (pdata->default_mode == USB_HOST) {
+ clear_bit(ID, &motg->inputs);
+ } else if (pdata->default_mode == USB_PERIPHERAL) {
+ set_bit(ID, &motg->inputs);
+ set_bit(B_SESS_VLD, &motg->inputs);
+ } else {
+ set_bit(ID, &motg->inputs);
+ clear_bit(B_SESS_VLD, &motg->inputs);
+ }
+ } else if (pdata->otg_control == OTG_PHY_CONTROL) {
if (otgsc & OTGSC_ID)
set_bit(ID, &motg->inputs);
else
clear_bit(ID, &motg->inputs);
-
if (otgsc & OTGSC_BSV)
set_bit(B_SESS_VLD, &motg->inputs);
else
clear_bit(B_SESS_VLD, &motg->inputs);
- } else if (pdata->otg_control == OTG_USER_CONTROL) {
- set_bit(ID, &motg->inputs);
+ } else if (pdata->otg_control == OTG_PMIC_CONTROL) {
+ if (pdata->pmic_id_irq) {
+ if (msm_otg_read_pmic_id_state(motg))
+ set_bit(ID, &motg->inputs);
+ else
+ clear_bit(ID, &motg->inputs);
+ } else if (motg->ext_id_irq) {
+ if (gpio_get_value(pdata->usb_id_gpio))
+ set_bit(ID, &motg->inputs);
+ else
+ clear_bit(ID, &motg->inputs);
+ } else if (motg->phy_irq) {
+ if (msm_otg_read_phy_id_state(motg))
+ set_bit(ID, &motg->inputs);
+ else
+ clear_bit(ID, &motg->inputs);
+ }
+ /*
+ * VBUS initial state is reported after PMIC
+ * driver initialization. Wait for it.
+ */
+ ret = wait_for_completion_timeout(&pmic_vbus_init,
+ VBUS_INIT_TIMEOUT);
+ if (!ret) {
+ dev_dbg(motg->phy.dev, "%s: timeout waiting for PMIC VBUS\n",
+ __func__);
+ msm_otg_dbg_log_event(&motg->phy,
+ "PMIC VBUS WAIT TMOUT", motg->inputs,
+ motg->phy.otg->state);
clear_bit(B_SESS_VLD, &motg->inputs);
+ pmic_vbus_init.done = 1;
+ }
}
break;
- case USB_DR_MODE_HOST:
+ case USB_HOST:
clear_bit(ID, &motg->inputs);
break;
- case USB_DR_MODE_PERIPHERAL:
+ case USB_PERIPHERAL:
set_bit(ID, &motg->inputs);
- if (otgsc & OTGSC_BSV)
+ if (pdata->otg_control == OTG_PHY_CONTROL) {
+ if (otgsc & OTGSC_BSV)
+ set_bit(B_SESS_VLD, &motg->inputs);
+ else
+ clear_bit(B_SESS_VLD, &motg->inputs);
+ } else if (pdata->otg_control == OTG_PMIC_CONTROL) {
+ /*
+ * VBUS initial state is reported after PMIC
+ * driver initialization. Wait for it.
+ */
+ ret = wait_for_completion_timeout(&pmic_vbus_init,
+ VBUS_INIT_TIMEOUT);
+ if (!ret) {
+ dev_dbg(motg->phy.dev, "%s: timeout waiting for PMIC VBUS\n",
+ __func__);
+ msm_otg_dbg_log_event(&motg->phy,
+ "PMIC VBUS WAIT TMOUT", motg->inputs,
+ motg->phy.otg->state);
+ clear_bit(B_SESS_VLD, &motg->inputs);
+ pmic_vbus_init.done = 1;
+ }
+ } else if (pdata->otg_control == OTG_USER_CONTROL) {
+ set_bit(ID, &motg->inputs);
set_bit(B_SESS_VLD, &motg->inputs);
- else
- clear_bit(B_SESS_VLD, &motg->inputs);
+ }
break;
default:
break;
}
+ msm_otg_dbg_log_event(&motg->phy, "SM INIT", pdata->mode, motg->inputs);
+ if (motg->id_state != USB_ID_GROUND)
+ motg->id_state = (test_bit(ID, &motg->inputs)) ? USB_ID_FLOAT :
+ USB_ID_GROUND;
+}
+
+static void msm_otg_wait_for_ext_chg_done(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+ unsigned long t;
+
+ /*
+ * Defer next cable connect event till external charger
+ * detection is completed.
+ */
+
+ if (motg->ext_chg_active == ACTIVE) {
+
+do_wait:
+ pr_debug("before msm_otg ext chg wait\n");
+ msm_otg_dbg_log_event(&motg->phy, "EXT CHG: WAIT", 0, 0);
+
+ t = wait_for_completion_timeout(&motg->ext_chg_wait,
+ msecs_to_jiffies(3000));
+ msm_otg_dbg_log_event(&motg->phy, "EXT CHG: DONE", t, 0);
+
+ if (!t)
+ pr_err("msm_otg ext chg wait timeout\n");
+ else if (motg->ext_chg_active == ACTIVE)
+ goto do_wait;
+ else
+ pr_debug("msm_otg ext chg wait done\n");
+ }
+
+ if (motg->ext_chg_opened) {
+ if (phy->flags & ENABLE_DP_MANUAL_PULLUP) {
+ ulpi_write(phy, ULPI_MISC_A_VBUSVLDEXT |
+ ULPI_MISC_A_VBUSVLDEXTSEL,
+ ULPI_CLR(ULPI_MISC_A));
+ }
+ /* clear charging register bits */
+ ulpi_write(phy, 0x3F, 0x86);
+ /* re-enable DP and DM pull-down resistors*/
+ ulpi_write(phy, 0x6, 0xB);
+ }
}
static void msm_otg_sm_work(struct work_struct *w)
{
struct msm_otg *motg = container_of(w, struct msm_otg, sm_work);
struct usb_otg *otg = motg->phy.otg;
+ struct device *dev = otg->usb_phy->dev;
+ bool work = 0, dcp;
+ int ret;
+
+ pr_debug("%s work\n", usb_otg_state_string(otg->state));
+ msm_otg_dbg_log_event(&motg->phy, "SM WORK:",
+ otg->state, motg->inputs);
+
+ /* Just resume h/w if reqd, pm_count is handled based on state/inputs */
+ if (motg->resume_pending) {
+ pm_runtime_get_sync(otg->usb_phy->dev);
+ if (atomic_read(&motg->in_lpm)) {
+ dev_err(dev, "SM WORK: USB is in LPM\n");
+ msm_otg_dbg_log_event(&motg->phy,
+ "SM WORK: USB IS IN LPM",
+ otg->state, motg->inputs);
+ msm_otg_resume(motg);
+ }
+ motg->resume_pending = false;
+ pm_runtime_put_noidle(otg->usb_phy->dev);
+ }
switch (otg->state) {
case OTG_STATE_UNDEFINED:
- dev_dbg(otg->usb_phy->dev, "OTG_STATE_UNDEFINED state\n");
+ pm_runtime_get_sync(otg->usb_phy->dev);
msm_otg_reset(otg->usb_phy);
+ /* Add child device only after block reset */
+ ret = of_platform_populate(motg->pdev->dev.of_node, NULL, NULL,
+ &motg->pdev->dev);
+ if (ret)
+ dev_dbg(&motg->pdev->dev, "failed to add BAM core\n");
+
msm_otg_init_sm(motg);
otg->state = OTG_STATE_B_IDLE;
+ if (!test_bit(B_SESS_VLD, &motg->inputs) &&
+ test_bit(ID, &motg->inputs)) {
+ msm_otg_dbg_log_event(&motg->phy,
+ "PM RUNTIME: UNDEF PUT",
+ get_pm_runtime_counter(otg->usb_phy->dev), 0);
+ pm_runtime_put_sync(otg->usb_phy->dev);
+ break;
+ }
+ pm_runtime_put(otg->usb_phy->dev);
/* FALL THROUGH */
case OTG_STATE_B_IDLE:
- dev_dbg(otg->usb_phy->dev, "OTG_STATE_B_IDLE state\n");
if (!test_bit(ID, &motg->inputs) && otg->host) {
- /* disable BSV bit */
- writel(readl(USB_OTGSC) & ~OTGSC_BSVIE, USB_OTGSC);
- msm_otg_start_host(otg->usb_phy, 1);
+ pr_debug("!id\n");
+ msm_otg_dbg_log_event(&motg->phy, "!ID",
+ motg->inputs, otg->state);
+
+ msm_otg_start_host(otg, 1);
otg->state = OTG_STATE_A_HOST;
} else if (test_bit(B_SESS_VLD, &motg->inputs)) {
+ pr_debug("b_sess_vld\n");
+ msm_otg_dbg_log_event(&motg->phy, "B_SESS_VLD",
+ motg->inputs, otg->state);
switch (motg->chg_state) {
case USB_CHG_STATE_UNDEFINED:
+ /* put at the end of chg_det or disconnect */
+ pm_runtime_get_sync(otg->usb_phy->dev);
+ msm_otg_dbg_log_event(&motg->phy, "PM CHG GET",
+ get_pm_runtime_counter(dev), 0);
+ motg->chg_state = USB_CHG_STATE_IN_PROGRESS;
msm_chg_detect_work(&motg->chg_work.work);
break;
case USB_CHG_STATE_DETECTED:
switch (motg->chg_type) {
case USB_DCP_CHARGER:
+ /* fall through */
+ case USB_NONCOMPLIANT_CHARGER:
+ msm_otg_notify_charger(motg,
+ dcp_max_current);
+ if (!motg->is_ext_chg_dcp)
+ otg->state =
+ OTG_STATE_B_CHARGER;
+ break;
+ case USB_FLOATED_CHARGER:
msm_otg_notify_charger(motg,
IDEV_CHG_MAX);
+ otg->state = OTG_STATE_B_CHARGER;
break;
case USB_CDP_CHARGER:
msm_otg_notify_charger(motg,
IDEV_CHG_MAX);
- msm_otg_start_peripheral(otg->usb_phy,
- 1);
- otg->state
- = OTG_STATE_B_PERIPHERAL;
- break;
+ /* fall through */
case USB_SDP_CHARGER:
- msm_otg_notify_charger(motg, IUNIT);
- msm_otg_start_peripheral(otg->usb_phy,
- 1);
- otg->state
- = OTG_STATE_B_PERIPHERAL;
+ pm_runtime_get_sync(otg->usb_phy->dev);
+ msm_otg_start_peripheral(otg, 1);
+ otg->state =
+ OTG_STATE_B_PERIPHERAL;
+ mod_timer(&motg->chg_check_timer,
+ CHG_RECHECK_DELAY);
break;
default:
break;
@@ -1400,88 +3105,354 @@
break;
}
} else {
+ pr_debug("chg_work cancel");
+ msm_otg_dbg_log_event(&motg->phy, "CHG_WORK CANCEL",
+ motg->inputs, otg->state);
+ del_timer_sync(&motg->chg_check_timer);
+ clear_bit(B_FALSE_SDP, &motg->inputs);
+ cancel_delayed_work_sync(&motg->chg_work);
/*
- * If charger detection work is pending, decrement
- * the pm usage counter to balance with the one that
- * is incremented in charger detection work.
+ * Find out whether chg_w couldn't start or finished.
+ * In both the cases, runtime ref_count vote is missing
*/
- if (cancel_delayed_work_sync(&motg->chg_work)) {
- pm_runtime_put_sync(otg->usb_phy->dev);
- msm_otg_reset(otg->usb_phy);
+ if (motg->chg_state == USB_CHG_STATE_UNDEFINED ||
+ motg->chg_state == USB_CHG_STATE_DETECTED) {
+ msm_otg_dbg_log_event(&motg->phy, "RT !CHG GET",
+ get_pm_runtime_counter(otg->usb_phy->dev), 0);
+ pm_runtime_get_sync(dev);
}
- msm_otg_notify_charger(motg, 0);
+
+ dcp = (motg->chg_type == USB_DCP_CHARGER);
motg->chg_state = USB_CHG_STATE_UNDEFINED;
motg->chg_type = USB_INVALID_CHARGER;
+ msm_otg_notify_charger(motg, 0);
+ if (dcp) {
+ if (motg->ext_chg_active == DEFAULT)
+ motg->ext_chg_active = INACTIVE;
+ msm_otg_wait_for_ext_chg_done(motg);
+ /* Turn off VDP_SRC */
+ ulpi_write(otg->usb_phy, 0x2, 0x86);
+ }
+ msm_chg_block_off(motg);
+ msm_otg_dbg_log_event(&motg->phy, "RT: CHG A PUT",
+ get_pm_runtime_counter(otg->usb_phy->dev), 0);
+ /* Delay used only if autosuspend enabled */
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
}
-
- if (otg->state == OTG_STATE_B_IDLE)
- pm_runtime_put_sync(otg->usb_phy->dev);
break;
case OTG_STATE_B_PERIPHERAL:
- dev_dbg(otg->usb_phy->dev, "OTG_STATE_B_PERIPHERAL state\n");
- if (!test_bit(B_SESS_VLD, &motg->inputs) ||
- !test_bit(ID, &motg->inputs)) {
- msm_otg_notify_charger(motg, 0);
- msm_otg_start_peripheral(otg->usb_phy, 0);
- motg->chg_state = USB_CHG_STATE_UNDEFINED;
- motg->chg_type = USB_INVALID_CHARGER;
+ if (test_bit(B_SESS_VLD, &motg->inputs) &&
+ test_bit(B_FALSE_SDP, &motg->inputs)) {
+ pr_debug("B_FALSE_SDP\n");
+ msm_otg_start_peripheral(otg, 0);
+ motg->chg_type = USB_DCP_CHARGER;
+ clear_bit(B_FALSE_SDP, &motg->inputs);
otg->state = OTG_STATE_B_IDLE;
- msm_otg_reset(otg->usb_phy);
- schedule_work(w);
+ msm_otg_dbg_log_event(&motg->phy, "B_FALSE_SDP PUT",
+ get_pm_runtime_counter(dev), motg->inputs);
+ pm_runtime_put_sync(dev);
+ /* schedule work to update charging current */
+ work = 1;
+ } else if (!test_bit(B_SESS_VLD, &motg->inputs)) {
+ msm_otg_start_peripheral(otg, 0);
+ msm_otg_dbg_log_event(&motg->phy, "RT PM: B_PERI A PUT",
+ get_pm_runtime_counter(dev), 0);
+ /* _put for _get done on cable connect in B_IDLE */
+ pm_runtime_put_noidle(dev);
+ /* Schedule work to finish cable disconnect processing*/
+ otg->state = OTG_STATE_B_IDLE;
+ work = 1;
+ } else if (test_bit(A_BUS_SUSPEND, &motg->inputs)) {
+ pr_debug("a_bus_suspend\n");
+ msm_otg_dbg_log_event(&motg->phy,
+ "BUS_SUSPEND: PM RT PUT",
+ get_pm_runtime_counter(dev), 0);
+ otg->state = OTG_STATE_B_SUSPEND;
+ /* _get on connect in B_IDLE or host resume in B_SUSP */
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ }
+ break;
+ case OTG_STATE_B_SUSPEND:
+ if (!test_bit(B_SESS_VLD, &motg->inputs)) {
+ msm_otg_start_peripheral(otg, 0);
+ otg->state = OTG_STATE_B_IDLE;
+ /* Schedule work to finish cable disconnect processing*/
+ work = 1;
+ } else if (!test_bit(A_BUS_SUSPEND, &motg->inputs)) {
+ pr_debug("!a_bus_suspend\n");
+ otg->state = OTG_STATE_B_PERIPHERAL;
+ msm_otg_dbg_log_event(&motg->phy,
+ "BUS_RESUME: PM RT GET",
+ get_pm_runtime_counter(dev), 0);
+ pm_runtime_get_sync(dev);
+ }
+ break;
+
+ case OTG_STATE_B_CHARGER:
+ if (test_bit(B_SESS_VLD, &motg->inputs)) {
+ pr_debug("BSV set again\n");
+ msm_otg_dbg_log_event(&motg->phy, "BSV SET AGAIN",
+ motg->inputs, otg->state);
+ } else if (!test_bit(B_SESS_VLD, &motg->inputs)) {
+ otg->state = OTG_STATE_B_IDLE;
+ work = 1;
}
break;
case OTG_STATE_A_HOST:
- dev_dbg(otg->usb_phy->dev, "OTG_STATE_A_HOST state\n");
if (test_bit(ID, &motg->inputs)) {
- msm_otg_start_host(otg->usb_phy, 0);
+ msm_otg_start_host(otg, 0);
otg->state = OTG_STATE_B_IDLE;
- msm_otg_reset(otg->usb_phy);
- schedule_work(w);
+ work = 1;
}
break;
default:
break;
}
+
+ if (work)
+ queue_work(motg->otg_wq, &motg->sm_work);
}
static irqreturn_t msm_otg_irq(int irq, void *data)
{
struct msm_otg *motg = data;
- struct usb_phy *phy = &motg->phy;
+ struct usb_otg *otg = motg->phy.otg;
u32 otgsc = 0;
+ bool work = 0;
if (atomic_read(&motg->in_lpm)) {
+ pr_debug("OTG IRQ: %d in LPM\n", irq);
+ msm_otg_dbg_log_event(&motg->phy, "OTG IRQ IS IN LPM",
+ irq, otg->state);
+ /*Ignore interrupt if one interrupt already seen in LPM*/
+ if (motg->async_int)
+ return IRQ_HANDLED;
+
disable_irq_nosync(irq);
- motg->async_int = 1;
- pm_runtime_get(phy->dev);
+ motg->async_int = irq;
+ msm_otg_kick_sm_work(motg);
+
return IRQ_HANDLED;
}
+ motg->usb_irq_count++;
- otgsc = readl(USB_OTGSC);
+ otgsc = readl_relaxed(USB_OTGSC);
if (!(otgsc & (OTGSC_IDIS | OTGSC_BSVIS)))
return IRQ_NONE;
if ((otgsc & OTGSC_IDIS) && (otgsc & OTGSC_IDIE)) {
- if (otgsc & OTGSC_ID)
+ if (otgsc & OTGSC_ID) {
+ dev_dbg(otg->usb_phy->dev, "ID set\n");
+ msm_otg_dbg_log_event(&motg->phy, "ID SET",
+ motg->inputs, otg->state);
set_bit(ID, &motg->inputs);
- else
+ } else {
+ dev_dbg(otg->usb_phy->dev, "ID clear\n");
+ msm_otg_dbg_log_event(&motg->phy, "ID CLEAR",
+ motg->inputs, otg->state);
clear_bit(ID, &motg->inputs);
- dev_dbg(phy->dev, "ID set/clear\n");
- pm_runtime_get_noresume(phy->dev);
- } else if ((otgsc & OTGSC_BSVIS) && (otgsc & OTGSC_BSVIE)) {
- if (otgsc & OTGSC_BSV)
+ }
+ work = 1;
+ } else if ((otgsc & OTGSC_BSVIE) && (otgsc & OTGSC_BSVIS)) {
+ if (otgsc & OTGSC_BSV) {
+ dev_dbg(otg->usb_phy->dev, "BSV set\n");
+ msm_otg_dbg_log_event(&motg->phy, "BSV SET",
+ motg->inputs, otg->state);
set_bit(B_SESS_VLD, &motg->inputs);
- else
+ } else {
+ dev_dbg(otg->usb_phy->dev, "BSV clear\n");
+ msm_otg_dbg_log_event(&motg->phy, "BSV CLEAR",
+ motg->inputs, otg->state);
clear_bit(B_SESS_VLD, &motg->inputs);
- dev_dbg(phy->dev, "BSV set/clear\n");
- pm_runtime_get_noresume(phy->dev);
+ clear_bit(A_BUS_SUSPEND, &motg->inputs);
+ }
+ work = 1;
+ }
+ if (work)
+ queue_work(motg->otg_wq, &motg->sm_work);
+
+ writel_relaxed(otgsc, USB_OTGSC);
+
+ return IRQ_HANDLED;
+}
+
+static void msm_otg_set_vbus_state(int online)
+{
+ struct msm_otg *motg = the_msm_otg;
+ static bool init;
+
+ motg->vbus_state = online;
+
+ if (motg->err_event_seen)
+ return;
+
+ if (online) {
+ pr_debug("PMIC: BSV set\n");
+ msm_otg_dbg_log_event(&motg->phy, "PMIC: BSV SET",
+ init, motg->inputs);
+ if (test_and_set_bit(B_SESS_VLD, &motg->inputs) && init)
+ return;
+ } else {
+ pr_debug("PMIC: BSV clear\n");
+ msm_otg_dbg_log_event(&motg->phy, "PMIC: BSV CLEAR",
+ init, motg->inputs);
+ motg->is_ext_chg_dcp = false;
+ if (!test_and_clear_bit(B_SESS_VLD, &motg->inputs) && init)
+ return;
}
- writel(otgsc, USB_OTGSC);
- schedule_work(&motg->sm_work);
+ /* do not queue state m/c work if id is grounded */
+ if (!test_bit(ID, &motg->inputs) &&
+ !motg->pdata->vbus_low_as_hostmode) {
+ /*
+ * state machine work waits for initial VBUS
+ * completion in UNDEFINED state. Process
+ * the initial VBUS event in ID_GND state.
+ */
+ if (init)
+ return;
+ }
+
+ if (!init) {
+ init = true;
+ if (pmic_vbus_init.done &&
+ test_bit(B_SESS_VLD, &motg->inputs)) {
+ pr_debug("PMIC: BSV came late\n");
+ msm_otg_dbg_log_event(&motg->phy, "PMIC: BSV CAME LATE",
+ init, motg->inputs);
+ goto out;
+ }
+
+ if (motg->pdata->vbus_low_as_hostmode &&
+ !test_bit(B_SESS_VLD, &motg->inputs)) {
+ motg->id_state = USB_ID_GROUND;
+ clear_bit(ID, &motg->inputs);
+ }
+ complete(&pmic_vbus_init);
+ pr_debug("PMIC: BSV init complete\n");
+ msm_otg_dbg_log_event(&motg->phy, "PMIC: BSV INIT COMPLETE",
+ init, motg->inputs);
+ return;
+ }
+
+out:
+ if (motg->is_ext_chg_dcp) {
+ if (test_bit(B_SESS_VLD, &motg->inputs)) {
+ msm_otg_notify_charger(motg, IDEV_CHG_MAX);
+ } else {
+ motg->is_ext_chg_dcp = false;
+ motg->chg_state = USB_CHG_STATE_UNDEFINED;
+ motg->chg_type = USB_INVALID_CHARGER;
+ msm_otg_notify_charger(motg, 0);
+ }
+ return;
+ }
+
+ msm_otg_dbg_log_event(&motg->phy, "CHECK VBUS EVENT DURING SUSPEND",
+ atomic_read(&motg->pm_suspended),
+ motg->sm_work_pending);
+
+ /* Move to host mode on vbus low if required */
+ if (motg->pdata->vbus_low_as_hostmode) {
+ if (!test_bit(B_SESS_VLD, &motg->inputs))
+ clear_bit(ID, &motg->inputs);
+ else
+ set_bit(ID, &motg->inputs);
+ }
+ msm_otg_kick_sm_work(motg);
+}
+
+static void msm_id_status_w(struct work_struct *w)
+{
+ struct msm_otg *motg = container_of(w, struct msm_otg,
+ id_status_work.work);
+ int work = 0;
+
+ dev_dbg(motg->phy.dev, "ID status_w\n");
+
+ if (motg->pdata->pmic_id_irq)
+ motg->id_state = msm_otg_read_pmic_id_state(motg);
+ else if (motg->ext_id_irq)
+ motg->id_state = gpio_get_value(motg->pdata->usb_id_gpio);
+ else if (motg->phy_irq)
+ motg->id_state = msm_otg_read_phy_id_state(motg);
+
+ if (motg->err_event_seen)
+ return;
+
+ if (motg->id_state) {
+ if (gpio_is_valid(motg->pdata->switch_sel_gpio))
+ gpio_direction_input(motg->pdata->switch_sel_gpio);
+ if (!test_and_set_bit(ID, &motg->inputs)) {
+ pr_debug("ID set\n");
+ msm_otg_dbg_log_event(&motg->phy, "ID SET",
+ motg->inputs, motg->phy.otg->state);
+ work = 1;
+ }
+ } else {
+ if (gpio_is_valid(motg->pdata->switch_sel_gpio))
+ gpio_direction_output(motg->pdata->switch_sel_gpio, 1);
+ if (test_and_clear_bit(ID, &motg->inputs)) {
+ pr_debug("ID clear\n");
+ msm_otg_dbg_log_event(&motg->phy, "ID CLEAR",
+ motg->inputs, motg->phy.otg->state);
+ work = 1;
+ }
+ }
+
+ if (work && (motg->phy.otg->state != OTG_STATE_UNDEFINED)) {
+ msm_otg_dbg_log_event(&motg->phy,
+ "CHECK ID EVENT DURING SUSPEND",
+ atomic_read(&motg->pm_suspended),
+ motg->sm_work_pending);
+ msm_otg_kick_sm_work(motg);
+ }
+}
+
+#define MSM_ID_STATUS_DELAY 5 /* 5msec */
+static irqreturn_t msm_id_irq(int irq, void *data)
+{
+ struct msm_otg *motg = data;
+
+ /*schedule delayed work for 5msec for ID line state to settle*/
+ queue_delayed_work(motg->otg_wq, &motg->id_status_work,
+ msecs_to_jiffies(MSM_ID_STATUS_DELAY));
+
return IRQ_HANDLED;
}
+int msm_otg_pm_notify(struct notifier_block *notify_block,
+ unsigned long mode, void *unused)
+{
+ struct msm_otg *motg = container_of(
+ notify_block, struct msm_otg, pm_notify);
+
+ dev_dbg(motg->phy.dev, "OTG PM notify:%lx, sm_pending:%u\n", mode,
+ motg->sm_work_pending);
+ msm_otg_dbg_log_event(&motg->phy, "PM NOTIFY",
+ mode, motg->sm_work_pending);
+
+ switch (mode) {
+ case PM_POST_SUSPEND:
+ /* OTG sm_work can be armed now */
+ atomic_set(&motg->pm_suspended, 0);
+
+ /* Handle any deferred wakeup events from USB during suspend */
+ if (motg->sm_work_pending) {
+ motg->sm_work_pending = false;
+ queue_work(motg->otg_wq, &motg->sm_work);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
static int msm_otg_mode_show(struct seq_file *s, void *unused)
{
struct msm_otg *motg = s->private;
@@ -1491,7 +3462,9 @@
case OTG_STATE_A_HOST:
seq_puts(s, "host\n");
break;
+ case OTG_STATE_B_IDLE:
case OTG_STATE_B_PERIPHERAL:
+ case OTG_STATE_B_SUSPEND:
seq_puts(s, "peripheral\n");
break;
default:
@@ -1513,9 +3486,9 @@
struct seq_file *s = file->private_data;
struct msm_otg *motg = s->private;
char buf[16];
- struct usb_otg *otg = motg->phy.otg;
+ struct usb_phy *phy = &motg->phy;
int status = count;
- enum usb_dr_mode req_mode;
+ enum usb_mode_type req_mode;
memset(buf, 0x00, sizeof(buf));
@@ -1525,21 +3498,22 @@
}
if (!strncmp(buf, "host", 4)) {
- req_mode = USB_DR_MODE_HOST;
+ req_mode = USB_HOST;
} else if (!strncmp(buf, "peripheral", 10)) {
- req_mode = USB_DR_MODE_PERIPHERAL;
+ req_mode = USB_PERIPHERAL;
} else if (!strncmp(buf, "none", 4)) {
- req_mode = USB_DR_MODE_UNKNOWN;
+ req_mode = USB_NONE;
} else {
status = -EINVAL;
goto out;
}
switch (req_mode) {
- case USB_DR_MODE_UNKNOWN:
- switch (otg->state) {
+ case USB_NONE:
+ switch (phy->otg->state) {
case OTG_STATE_A_HOST:
case OTG_STATE_B_PERIPHERAL:
+ case OTG_STATE_B_SUSPEND:
set_bit(ID, &motg->inputs);
clear_bit(B_SESS_VLD, &motg->inputs);
break;
@@ -1547,8 +3521,8 @@
goto out;
}
break;
- case USB_DR_MODE_PERIPHERAL:
- switch (otg->state) {
+ case USB_PERIPHERAL:
+ switch (phy->otg->state) {
case OTG_STATE_B_IDLE:
case OTG_STATE_A_HOST:
set_bit(ID, &motg->inputs);
@@ -1558,10 +3532,11 @@
goto out;
}
break;
- case USB_DR_MODE_HOST:
- switch (otg->state) {
+ case USB_HOST:
+ switch (phy->otg->state) {
case OTG_STATE_B_IDLE:
case OTG_STATE_B_PERIPHERAL:
+ case OTG_STATE_B_SUSPEND:
clear_bit(ID, &motg->inputs);
break;
default:
@@ -1572,13 +3547,14 @@
goto out;
}
- pm_runtime_get_sync(otg->usb_phy->dev);
- schedule_work(&motg->sm_work);
+ motg->id_state = (test_bit(ID, &motg->inputs)) ? USB_ID_FLOAT :
+ USB_ID_GROUND;
+ queue_work(motg->otg_wq, &motg->sm_work);
out:
return status;
}
-static const struct file_operations msm_otg_mode_fops = {
+const struct file_operations msm_otg_mode_fops = {
.open = msm_otg_mode_open,
.read = seq_read,
.write = msm_otg_mode_write,
@@ -1586,66 +3562,692 @@
.release = single_release,
};
+static int msm_otg_show_otg_state(struct seq_file *s, void *unused)
+{
+ struct msm_otg *motg = s->private;
+ struct usb_phy *phy = &motg->phy;
+
+ seq_printf(s, "%s\n", usb_otg_state_string(phy->otg->state));
+ return 0;
+}
+
+static int msm_otg_otg_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, msm_otg_show_otg_state, inode->i_private);
+}
+
+const struct file_operations msm_otg_state_fops = {
+ .open = msm_otg_otg_state_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int msm_otg_show_chg_type(struct seq_file *s, void *unused)
+{
+ struct msm_otg *motg = s->private;
+
+ seq_printf(s, "%s\n", chg_to_string(motg->chg_type));
+ return 0;
+}
+
+static int msm_otg_chg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, msm_otg_show_chg_type, inode->i_private);
+}
+
+const struct file_operations msm_otg_chg_fops = {
+ .open = msm_otg_chg_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int msm_otg_bus_show(struct seq_file *s, void *unused)
+{
+ if (debug_bus_voting_enabled)
+ seq_puts(s, "enabled\n");
+ else
+ seq_puts(s, "disabled\n");
+
+ return 0;
+}
+
+static int msm_otg_bus_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, msm_otg_bus_show, inode->i_private);
+}
+
+static ssize_t msm_otg_bus_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char buf[8];
+ struct seq_file *s = file->private_data;
+ struct msm_otg *motg = s->private;
+
+ memset(buf, 0x00, sizeof(buf));
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ if (!strncmp(buf, "enable", 6)) {
+ /* Do not vote here. Let OTG statemachine decide when to vote */
+ debug_bus_voting_enabled = true;
+ } else {
+ debug_bus_voting_enabled = false;
+ msm_otg_bus_vote(motg, USB_MIN_PERF_VOTE);
+ }
+
+ return count;
+}
+
+static int msm_otg_dbg_buff_show(struct seq_file *s, void *unused)
+{
+ struct msm_otg *motg = s->private;
+ unsigned long flags;
+ unsigned int i;
+
+ read_lock_irqsave(&motg->dbg_lock, flags);
+
+ i = motg->dbg_idx;
+ if (strnlen(motg->buf[i], DEBUG_MSG_LEN))
+ seq_printf(s, "%s\n", motg->buf[i]);
+ for (dbg_inc(&i); i != motg->dbg_idx; dbg_inc(&i)) {
+ if (!strnlen(motg->buf[i], DEBUG_MSG_LEN))
+ continue;
+ seq_printf(s, "%s\n", motg->buf[i]);
+ }
+ read_unlock_irqrestore(&motg->dbg_lock, flags);
+
+ return 0;
+}
+
+static int msm_otg_dbg_buff_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, msm_otg_dbg_buff_show, inode->i_private);
+}
+
+const struct file_operations msm_otg_dbg_buff_fops = {
+ .open = msm_otg_dbg_buff_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int msm_otg_dpdm_regulator_enable(struct regulator_dev *rdev)
+{
+ int ret = 0;
+ struct msm_otg *motg = rdev_get_drvdata(rdev);
+
+ if (!motg->rm_pulldown) {
+ ret = msm_hsusb_ldo_enable(motg, USB_PHY_REG_3P3_ON);
+ if (!ret) {
+ motg->rm_pulldown = true;
+ msm_otg_dbg_log_event(&motg->phy, "RM Pulldown",
+ motg->rm_pulldown, 0);
+ }
+ }
+
+ return ret;
+}
+
+static int msm_otg_dpdm_regulator_disable(struct regulator_dev *rdev)
+{
+ int ret = 0;
+ struct msm_otg *motg = rdev_get_drvdata(rdev);
+
+ if (motg->rm_pulldown) {
+ ret = msm_hsusb_ldo_enable(motg, USB_PHY_REG_3P3_OFF);
+ if (!ret) {
+ motg->rm_pulldown = false;
+ msm_otg_dbg_log_event(&motg->phy, "RM Pulldown",
+ motg->rm_pulldown, 0);
+ }
+ }
+
+ return ret;
+}
+
+static int msm_otg_dpdm_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct msm_otg *motg = rdev_get_drvdata(rdev);
+
+ return motg->rm_pulldown;
+}
+
+static struct regulator_ops msm_otg_dpdm_regulator_ops = {
+ .enable = msm_otg_dpdm_regulator_enable,
+ .disable = msm_otg_dpdm_regulator_disable,
+ .is_enabled = msm_otg_dpdm_regulator_is_enabled,
+};
+
+static int usb_phy_regulator_init(struct msm_otg *motg)
+{
+ struct device *dev = motg->phy.dev;
+ struct regulator_config cfg = {};
+ struct regulator_init_data *init_data;
+
+ init_data = devm_kzalloc(dev, sizeof(*init_data), GFP_KERNEL);
+ if (!init_data)
+ return -ENOMEM;
+
+ init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS;
+ motg->dpdm_rdesc.owner = THIS_MODULE;
+ motg->dpdm_rdesc.type = REGULATOR_VOLTAGE;
+ motg->dpdm_rdesc.ops = &msm_otg_dpdm_regulator_ops;
+ motg->dpdm_rdesc.name = kbasename(dev->of_node->full_name);
+
+ cfg.dev = dev;
+ cfg.init_data = init_data;
+ cfg.driver_data = motg;
+ cfg.of_node = dev->of_node;
+
+ motg->dpdm_rdev = devm_regulator_register(dev, &motg->dpdm_rdesc, &cfg);
+ if (IS_ERR(motg->dpdm_rdev))
+ return PTR_ERR(motg->dpdm_rdev);
+
+ return 0;
+}
+
+const struct file_operations msm_otg_bus_fops = {
+ .open = msm_otg_bus_open,
+ .read = seq_read,
+ .write = msm_otg_bus_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static struct dentry *msm_otg_dbg_root;
-static struct dentry *msm_otg_dbg_mode;
static int msm_otg_debugfs_init(struct msm_otg *motg)
{
+ struct dentry *msm_otg_dentry;
+ struct msm_otg_platform_data *pdata = motg->pdata;
+
msm_otg_dbg_root = debugfs_create_dir("msm_otg", NULL);
if (!msm_otg_dbg_root || IS_ERR(msm_otg_dbg_root))
return -ENODEV;
- msm_otg_dbg_mode = debugfs_create_file("mode", S_IRUGO | S_IWUSR,
- msm_otg_dbg_root, motg, &msm_otg_mode_fops);
- if (!msm_otg_dbg_mode) {
- debugfs_remove(msm_otg_dbg_root);
- msm_otg_dbg_root = NULL;
+ if ((pdata->mode == USB_OTG || pdata->mode == USB_PERIPHERAL) &&
+ pdata->otg_control == OTG_USER_CONTROL) {
+
+ msm_otg_dentry = debugfs_create_file("mode", 0644,
+ msm_otg_dbg_root, motg, &msm_otg_mode_fops);
+
+ if (!msm_otg_dentry) {
+ debugfs_remove(msm_otg_dbg_root);
+ msm_otg_dbg_root = NULL;
+ return -ENODEV;
+ }
+ }
+
+ msm_otg_dentry = debugfs_create_file("chg_type", 0444, msm_otg_dbg_root,
+ motg, &msm_otg_chg_fops);
+
+ if (!msm_otg_dentry) {
+ debugfs_remove_recursive(msm_otg_dbg_root);
return -ENODEV;
}
+ msm_otg_dentry = debugfs_create_file("bus_voting", 0644,
+ msm_otg_dbg_root, motg, &msm_otg_bus_fops);
+
+ if (!msm_otg_dentry) {
+ debugfs_remove_recursive(msm_otg_dbg_root);
+ return -ENODEV;
+ }
+
+ msm_otg_dentry = debugfs_create_file("otg_state", 0444,
+ msm_otg_dbg_root, motg, &msm_otg_state_fops);
+
+ if (!msm_otg_dentry) {
+ debugfs_remove_recursive(msm_otg_dbg_root);
+ return -ENODEV;
+ }
+
+ msm_otg_dentry = debugfs_create_file("dbg_buff", 0444,
+ msm_otg_dbg_root, motg, &msm_otg_dbg_buff_fops);
+
+ if (!msm_otg_dentry) {
+ debugfs_remove_recursive(msm_otg_dbg_root);
+ return -ENODEV;
+ }
return 0;
}
static void msm_otg_debugfs_cleanup(void)
{
- debugfs_remove(msm_otg_dbg_mode);
- debugfs_remove(msm_otg_dbg_root);
+ debugfs_remove_recursive(msm_otg_dbg_root);
}
-static const struct of_device_id msm_otg_dt_match[] = {
- {
- .compatible = "qcom,usb-otg-ci",
- .data = (void *) CI_45NM_INTEGRATED_PHY
- },
- {
- .compatible = "qcom,usb-otg-snps",
- .data = (void *) SNPS_28NM_INTEGRATED_PHY
- },
- { }
+static ssize_t
+set_msm_otg_perf_mode(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct msm_otg *motg = the_msm_otg;
+ int ret;
+ long clk_rate;
+
+ pr_debug("%s: enable:%d\n", __func__, !strncasecmp(buf, "enable", 6));
+
+ if (!strncasecmp(buf, "enable", 6)) {
+ clk_rate = motg->core_clk_nominal_rate;
+ msm_otg_bus_freq_set(motg, USB_NOC_NOM_VOTE);
+ } else {
+ clk_rate = motg->core_clk_svs_rate;
+ msm_otg_bus_freq_set(motg, USB_NOC_SVS_VOTE);
+ }
+
+ if (clk_rate) {
+ pr_debug("Set usb sys_clk rate:%ld\n", clk_rate);
+ ret = clk_set_rate(motg->core_clk, clk_rate);
+ if (ret)
+ pr_err("sys_clk set_rate fail:%d %ld\n", ret, clk_rate);
+ msm_otg_dbg_log_event(&motg->phy, "OTG PERF SET",
+ clk_rate, ret);
+ } else {
+ pr_err("usb sys_clk rate is undefined\n");
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(perf_mode, 0200, NULL, set_msm_otg_perf_mode);
+
+#define MSM_OTG_CMD_ID 0x09
+#define MSM_OTG_DEVICE_ID 0x04
+#define MSM_OTG_VMID_IDX 0xFF
+#define MSM_OTG_MEM_TYPE 0x02
+struct msm_otg_scm_cmd_buf {
+ unsigned int device_id;
+ unsigned int vmid_idx;
+ unsigned int mem_type;
+} __attribute__ ((__packed__));
+
+static void msm_otg_pnoc_errata_fix(struct msm_otg *motg)
+{
+ int ret;
+ struct msm_otg_platform_data *pdata = motg->pdata;
+ struct msm_otg_scm_cmd_buf cmd_buf;
+
+ if (!pdata->pnoc_errata_fix)
+ return;
+
+ dev_dbg(motg->phy.dev, "applying fix for pnoc h/w issue\n");
+
+ cmd_buf.device_id = MSM_OTG_DEVICE_ID;
+ cmd_buf.vmid_idx = MSM_OTG_VMID_IDX;
+ cmd_buf.mem_type = MSM_OTG_MEM_TYPE;
+
+ ret = scm_call(SCM_SVC_MP, MSM_OTG_CMD_ID, &cmd_buf,
+ sizeof(cmd_buf), NULL, 0);
+
+ if (ret)
+ dev_err(motg->phy.dev, "scm command failed to update VMIDMT\n");
+}
+
+static u64 msm_otg_dma_mask = DMA_BIT_MASK(32);
+static struct platform_device *msm_otg_add_pdev(
+ struct platform_device *ofdev, const char *name)
+{
+ struct platform_device *pdev;
+ const struct resource *res = ofdev->resource;
+ unsigned int num = ofdev->num_resources;
+ int retval;
+ struct ci13xxx_platform_data ci_pdata;
+ struct msm_otg_platform_data *otg_pdata;
+ struct msm_otg *motg;
+
+ pdev = platform_device_alloc(name, -1);
+ if (!pdev) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ pdev->dev.dma_mask = &msm_otg_dma_mask;
+ pdev->dev.parent = &ofdev->dev;
+
+ if (num) {
+ retval = platform_device_add_resources(pdev, res, num);
+ if (retval)
+ goto error;
+ }
+
+ if (!strcmp(name, "msm_hsusb")) {
+ otg_pdata =
+ (struct msm_otg_platform_data *)
+ ofdev->dev.platform_data;
+ motg = platform_get_drvdata(ofdev);
+ ci_pdata.log2_itc = otg_pdata->log2_itc;
+ ci_pdata.usb_core_id = 0;
+ ci_pdata.l1_supported = otg_pdata->l1_supported;
+ ci_pdata.enable_ahb2ahb_bypass =
+ otg_pdata->enable_ahb2ahb_bypass;
+ ci_pdata.enable_streaming = otg_pdata->enable_streaming;
+ ci_pdata.enable_axi_prefetch = otg_pdata->enable_axi_prefetch;
+ retval = platform_device_add_data(pdev, &ci_pdata,
+ sizeof(ci_pdata));
+ if (retval)
+ goto error;
+ }
+
+ retval = platform_device_add(pdev);
+ if (retval)
+ goto error;
+
+ return pdev;
+
+error:
+ platform_device_put(pdev);
+ return ERR_PTR(retval);
+}
+
+static int msm_otg_setup_devices(struct platform_device *ofdev,
+ enum usb_mode_type mode, bool init)
+{
+ const char *gadget_name = "msm_hsusb";
+ const char *host_name = "msm_hsusb_host";
+ static struct platform_device *gadget_pdev;
+ static struct platform_device *host_pdev;
+ int retval = 0;
+
+ if (!init) {
+ if (gadget_pdev) {
+ platform_device_unregister(gadget_pdev);
+ device_remove_file(&gadget_pdev->dev,
+ &dev_attr_perf_mode);
+ }
+ if (host_pdev)
+ platform_device_unregister(host_pdev);
+ return 0;
+ }
+
+ switch (mode) {
+ case USB_OTG:
+ /* fall through */
+ case USB_PERIPHERAL:
+ gadget_pdev = msm_otg_add_pdev(ofdev, gadget_name);
+ if (IS_ERR(gadget_pdev)) {
+ retval = PTR_ERR(gadget_pdev);
+ break;
+ }
+ if (device_create_file(&gadget_pdev->dev, &dev_attr_perf_mode))
+ dev_err(&gadget_pdev->dev, "perf_mode file failed\n");
+ if (mode == USB_PERIPHERAL)
+ break;
+ /* fall through */
+ case USB_HOST:
+ host_pdev = msm_otg_add_pdev(ofdev, host_name);
+ if (IS_ERR(host_pdev)) {
+ retval = PTR_ERR(host_pdev);
+ if (mode == USB_OTG) {
+ platform_device_unregister(gadget_pdev);
+ device_remove_file(&gadget_pdev->dev,
+ &dev_attr_perf_mode);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return retval;
+}
+
+static int msm_otg_ext_chg_open(struct inode *inode, struct file *file)
+{
+ struct msm_otg *motg = the_msm_otg;
+
+ pr_debug("msm_otg ext chg open\n");
+ msm_otg_dbg_log_event(&motg->phy, "EXT CHG: OPEN",
+ motg->inputs, motg->phy.otg->state);
+
+ motg->ext_chg_opened = true;
+ file->private_data = (void *)motg;
+ return 0;
+}
+
+static long
+msm_otg_ext_chg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct msm_otg *motg = file->private_data;
+ struct msm_usb_chg_info info = {0};
+ int ret = 0, val;
+
+ msm_otg_dbg_log_event(&motg->phy, "EXT CHG: IOCTL", cmd, 0);
+ switch (cmd) {
+ case MSM_USB_EXT_CHG_INFO:
+ info.chg_block_type = USB_CHG_BLOCK_ULPI;
+ info.page_offset = motg->io_res->start & ~PAGE_MASK;
+ /* mmap() works on PAGE granularity */
+ info.length = PAGE_SIZE;
+
+ if (copy_to_user((void __user *)arg, &info, sizeof(info))) {
+ pr_err("%s: copy to user failed\n\n", __func__);
+ ret = -EFAULT;
+ }
+ break;
+ case MSM_USB_EXT_CHG_BLOCK_LPM:
+ if (get_user(val, (int __user *)arg)) {
+ pr_err("%s: get_user failed\n\n", __func__);
+ ret = -EFAULT;
+ break;
+ }
+ pr_debug("%s: LPM block request %d\n", __func__, val);
+ msm_otg_dbg_log_event(&motg->phy, "LPM BLOCK REQ", val, 0);
+ if (val) { /* block LPM */
+ if (motg->chg_type == USB_DCP_CHARGER) {
+ motg->ext_chg_active = ACTIVE;
+ msm_otg_dbg_log_event(&motg->phy,
+ "PM RUNTIME: EXT_CHG GET",
+ get_pm_runtime_counter(motg->phy.dev), 0);
+ pm_runtime_get_sync(motg->phy.dev);
+ } else {
+ motg->ext_chg_active = INACTIVE;
+ complete(&motg->ext_chg_wait);
+ ret = -ENODEV;
+ }
+ } else {
+ motg->ext_chg_active = INACTIVE;
+ complete(&motg->ext_chg_wait);
+ /*
+ * If usb cable is disconnected and then userspace
+ * calls ioctl to unblock low power mode, make sure
+ * otg_sm work for usb disconnect is processed first
+ * followed by decrementing the PM usage counters.
+ */
+ flush_work(&motg->sm_work);
+ msm_otg_dbg_log_event(&motg->phy,
+ "PM RUNTIME: EXT_CHG PUT",
+ get_pm_runtime_counter(motg->phy.dev), 0);
+ pm_runtime_put_sync(motg->phy.dev);
+ }
+ break;
+ case MSM_USB_EXT_CHG_VOLTAGE_INFO:
+ if (get_user(val, (int __user *)arg)) {
+ pr_err("%s: get_user failed\n\n", __func__);
+ ret = -EFAULT;
+ break;
+ }
+ msm_otg_dbg_log_event(&motg->phy, "EXT CHG: VOL REQ", cmd, val);
+
+ if (val == USB_REQUEST_5V)
+ pr_debug("%s:voting 5V voltage request\n", __func__);
+ else if (val == USB_REQUEST_9V)
+ pr_debug("%s:voting 9V voltage request\n", __func__);
+ break;
+ case MSM_USB_EXT_CHG_RESULT:
+ if (get_user(val, (int __user *)arg)) {
+ pr_err("%s: get_user failed\n\n", __func__);
+ ret = -EFAULT;
+ break;
+ }
+ msm_otg_dbg_log_event(&motg->phy, "EXT CHG: VOL REQ", cmd, val);
+
+ if (!val)
+ pr_debug("%s:voltage request successful\n", __func__);
+ else
+ pr_debug("%s:voltage request failed\n", __func__);
+ break;
+ case MSM_USB_EXT_CHG_TYPE:
+ if (get_user(val, (int __user *)arg)) {
+ pr_err("%s: get_user failed\n\n", __func__);
+ ret = -EFAULT;
+ break;
+ }
+ msm_otg_dbg_log_event(&motg->phy, "EXT CHG: VOL REQ", cmd, val);
+
+ if (val)
+ pr_debug("%s:charger is external charger\n", __func__);
+ else
+ pr_debug("%s:charger is not ext charger\n", __func__);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int msm_otg_ext_chg_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct msm_otg *motg = file->private_data;
+ unsigned long vsize = vma->vm_end - vma->vm_start;
+ int ret;
+
+ if (vma->vm_pgoff || vsize > PAGE_SIZE)
+ return -EINVAL;
+
+ vma->vm_pgoff = __phys_to_pfn(motg->io_res->start);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vsize, vma->vm_page_prot);
+ if (ret < 0) {
+ pr_err("%s: failed with return val %d\n", __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int msm_otg_ext_chg_release(struct inode *inode, struct file *file)
+{
+ struct msm_otg *motg = file->private_data;
+
+ pr_debug("msm_otg ext chg release\n");
+ msm_otg_dbg_log_event(&motg->phy, "EXT CHG: RELEASE",
+ motg->inputs, motg->phy.otg->state);
+
+ motg->ext_chg_opened = false;
+
+ return 0;
+}
+
+static const struct file_operations msm_otg_ext_chg_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_otg_ext_chg_open,
+ .unlocked_ioctl = msm_otg_ext_chg_ioctl,
+ .mmap = msm_otg_ext_chg_mmap,
+ .release = msm_otg_ext_chg_release,
};
-MODULE_DEVICE_TABLE(of, msm_otg_dt_match);
+
+static int msm_otg_setup_ext_chg_cdev(struct msm_otg *motg)
+{
+ int ret;
+
+ if (motg->pdata->enable_sec_phy || motg->pdata->mode == USB_HOST ||
+ motg->pdata->otg_control != OTG_PMIC_CONTROL) {
+ pr_debug("usb ext chg is not supported by msm otg\n");
+ return -ENODEV;
+ }
+
+ ret = alloc_chrdev_region(&motg->ext_chg_dev, 0, 1, "usb_ext_chg");
+ if (ret < 0) {
+ pr_err("Fail to allocate usb ext char dev region\n");
+ return ret;
+ }
+ motg->ext_chg_class = class_create(THIS_MODULE, "msm_ext_chg");
+ if (ret < 0) {
+ pr_err("Fail to create usb ext chg class\n");
+ goto unreg_chrdev;
+ }
+ cdev_init(&motg->ext_chg_cdev, &msm_otg_ext_chg_fops);
+ motg->ext_chg_cdev.owner = THIS_MODULE;
+
+ ret = cdev_add(&motg->ext_chg_cdev, motg->ext_chg_dev, 1);
+ if (ret < 0) {
+ pr_err("Fail to add usb ext chg cdev\n");
+ goto destroy_class;
+ }
+ motg->ext_chg_device = device_create(motg->ext_chg_class,
+ NULL, motg->ext_chg_dev, NULL,
+ "usb_ext_chg");
+ if (IS_ERR(motg->ext_chg_device)) {
+ pr_err("Fail to create usb ext chg device\n");
+ ret = PTR_ERR(motg->ext_chg_device);
+ motg->ext_chg_device = NULL;
+ goto del_cdev;
+ }
+
+ init_completion(&motg->ext_chg_wait);
+ pr_debug("msm otg ext chg cdev setup success\n");
+ return 0;
+
+del_cdev:
+ cdev_del(&motg->ext_chg_cdev);
+destroy_class:
+ class_destroy(motg->ext_chg_class);
+unreg_chrdev:
+ unregister_chrdev_region(motg->ext_chg_dev, 1);
+
+ return ret;
+}
+
+static ssize_t dpdm_pulldown_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct msm_otg *motg = the_msm_otg;
+ struct msm_otg_platform_data *pdata = motg->pdata;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", pdata->dpdm_pulldown_added ?
+ "enabled" : "disabled");
+}
+
+static ssize_t dpdm_pulldown_enable_store(struct device *dev,
+ struct device_attribute *attr, const char
+ *buf, size_t size)
+{
+ struct msm_otg *motg = the_msm_otg;
+ struct msm_otg_platform_data *pdata = motg->pdata;
+
+ if (!strncasecmp(buf, "enable", 6)) {
+ pdata->dpdm_pulldown_added = true;
+ return size;
+ } else if (!strncasecmp(buf, "disable", 7)) {
+ pdata->dpdm_pulldown_added = false;
+ return size;
+ }
+
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(dpdm_pulldown_enable, 0644,
+ dpdm_pulldown_enable_show, dpdm_pulldown_enable_store);
static int msm_otg_vbus_notifier(struct notifier_block *nb, unsigned long event,
void *ptr)
{
- struct msm_usb_cable *vbus = container_of(nb, struct msm_usb_cable, nb);
- struct msm_otg *motg = container_of(vbus, struct msm_otg, vbus);
+ struct msm_otg *motg = container_of(nb, struct msm_otg, vbus_nb);
if (event)
set_bit(B_SESS_VLD, &motg->inputs);
else
clear_bit(B_SESS_VLD, &motg->inputs);
- if (test_bit(B_SESS_VLD, &motg->inputs)) {
- /* Switch D+/D- lines to Device connector */
- gpiod_set_value_cansleep(motg->switch_gpio, 0);
- } else {
- /* Switch D+/D- lines to Hub */
- gpiod_set_value_cansleep(motg->switch_gpio, 1);
- }
-
- schedule_work(&motg->sm_work);
+ queue_work(motg->otg_wq, &motg->sm_work);
return NOTIFY_DONE;
}
@@ -1653,362 +4255,974 @@
static int msm_otg_id_notifier(struct notifier_block *nb, unsigned long event,
void *ptr)
{
- struct msm_usb_cable *id = container_of(nb, struct msm_usb_cable, nb);
- struct msm_otg *motg = container_of(id, struct msm_otg, id);
+ struct msm_otg *motg = container_of(nb, struct msm_otg, id_nb);
if (event)
clear_bit(ID, &motg->inputs);
else
set_bit(ID, &motg->inputs);
- schedule_work(&motg->sm_work);
+ queue_work(motg->otg_wq, &motg->sm_work);
return NOTIFY_DONE;
}
-static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg)
+static int msm_otg_extcon_register(struct msm_otg *motg)
{
- struct msm_otg_platform_data *pdata;
- struct extcon_dev *ext_id, *ext_vbus;
+ struct device_node *node = motg->pdev->dev.of_node;
+ struct extcon_dev *edev;
+ int ret = 0;
+
+ if (!of_property_read_bool(node, "extcon"))
+ return 0;
+
+ edev = extcon_get_edev_by_phandle(&motg->pdev->dev, 0);
+ if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV)
+ return PTR_ERR(edev);
+
+ if (!IS_ERR(edev)) {
+ motg->extcon_vbus = edev;
+ motg->vbus_nb.notifier_call = msm_otg_vbus_notifier;
+ ret = extcon_register_notifier(edev, EXTCON_USB,
+ &motg->vbus_nb);
+ if (ret < 0) {
+ dev_err(&motg->pdev->dev, "failed to register notifier for USB\n");
+ return ret;
+ }
+ }
+
+ if (of_count_phandle_with_args(node, "extcon", NULL) > 1) {
+ edev = extcon_get_edev_by_phandle(&motg->pdev->dev, 1);
+ if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
+ ret = PTR_ERR(edev);
+ goto err;
+ }
+ }
+
+ if (!IS_ERR(edev)) {
+ motg->extcon_id = edev;
+ motg->id_nb.notifier_call = msm_otg_id_notifier;
+ ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
+ &motg->id_nb);
+ if (ret < 0) {
+ dev_err(&motg->pdev->dev, "failed to register notifier for USB-HOST\n");
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ if (motg->extcon_vbus)
+ extcon_unregister_notifier(motg->extcon_vbus, EXTCON_USB,
+ &motg->vbus_nb);
+
+ return ret;
+}
+
+struct msm_otg_platform_data *msm_otg_dt_to_pdata(struct platform_device *pdev)
+{
struct device_node *node = pdev->dev.of_node;
- struct property *prop;
- int len, ret, words;
- u32 val, tmp[3];
+ struct msm_otg_platform_data *pdata;
+ int len = 0;
+ int res_gpio;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
- return -ENOMEM;
+ return NULL;
- motg->pdata = pdata;
-
- pdata->phy_type = (enum msm_usb_phy_type)of_device_get_match_data(&pdev->dev);
- if (!pdata->phy_type)
- return 1;
-
- motg->link_rst = devm_reset_control_get(&pdev->dev, "link");
- if (IS_ERR(motg->link_rst))
- return PTR_ERR(motg->link_rst);
-
- motg->phy_rst = devm_reset_control_get(&pdev->dev, "phy");
- if (IS_ERR(motg->phy_rst))
- motg->phy_rst = NULL;
-
- pdata->mode = usb_get_dr_mode(&pdev->dev);
- if (pdata->mode == USB_DR_MODE_UNKNOWN)
- pdata->mode = USB_DR_MODE_OTG;
-
- pdata->otg_control = OTG_PHY_CONTROL;
- if (!of_property_read_u32(node, "qcom,otg-control", &val))
- if (val == OTG_PMIC_CONTROL)
- pdata->otg_control = val;
-
- if (!of_property_read_u32(node, "qcom,phy-num", &val) && val < 2)
- motg->phy_number = val;
-
- motg->vdd_levels[VDD_LEVEL_NONE] = USB_PHY_SUSP_DIG_VOL;
- motg->vdd_levels[VDD_LEVEL_MIN] = USB_PHY_VDD_DIG_VOL_MIN;
- motg->vdd_levels[VDD_LEVEL_MAX] = USB_PHY_VDD_DIG_VOL_MAX;
-
- if (of_get_property(node, "qcom,vdd-levels", &len) &&
- len == sizeof(tmp)) {
- of_property_read_u32_array(node, "qcom,vdd-levels",
- tmp, len / sizeof(*tmp));
- motg->vdd_levels[VDD_LEVEL_NONE] = tmp[VDD_LEVEL_NONE];
- motg->vdd_levels[VDD_LEVEL_MIN] = tmp[VDD_LEVEL_MIN];
- motg->vdd_levels[VDD_LEVEL_MAX] = tmp[VDD_LEVEL_MAX];
+ of_get_property(node, "qcom,hsusb-otg-phy-init-seq", &len);
+ if (len) {
+ pdata->phy_init_seq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ if (!pdata->phy_init_seq)
+ return NULL;
+ of_property_read_u32_array(node, "qcom,hsusb-otg-phy-init-seq",
+ pdata->phy_init_seq,
+ len/sizeof(*pdata->phy_init_seq));
}
+ of_property_read_u32(node, "qcom,hsusb-otg-power-budget",
+ &pdata->power_budget);
+ of_property_read_u32(node, "qcom,hsusb-otg-mode",
+ &pdata->mode);
+ of_property_read_u32(node, "qcom,hsusb-otg-otg-control",
+ &pdata->otg_control);
+ of_property_read_u32(node, "qcom,hsusb-otg-default-mode",
+ &pdata->default_mode);
+ of_property_read_u32(node, "qcom,hsusb-otg-phy-type",
+ &pdata->phy_type);
+ pdata->disable_reset_on_disconnect = of_property_read_bool(node,
+ "qcom,hsusb-otg-disable-reset");
+ pdata->pnoc_errata_fix = of_property_read_bool(node,
+ "qcom,hsusb-otg-pnoc-errata-fix");
+ pdata->enable_lpm_on_dev_suspend = of_property_read_bool(node,
+ "qcom,hsusb-otg-lpm-on-dev-suspend");
+ pdata->core_clk_always_on_workaround = of_property_read_bool(node,
+ "qcom,hsusb-otg-clk-always-on-workaround");
+ pdata->delay_lpm_on_disconnect = of_property_read_bool(node,
+ "qcom,hsusb-otg-delay-lpm");
+ pdata->dp_manual_pullup = of_property_read_bool(node,
+ "qcom,dp-manual-pullup");
+ pdata->enable_sec_phy = of_property_read_bool(node,
+ "qcom,usb2-enable-hsphy2");
+ of_property_read_u32(node, "qcom,hsusb-log2-itc",
+ &pdata->log2_itc);
- motg->manual_pullup = of_property_read_bool(node, "qcom,manual-pullup");
+ of_property_read_u32(node, "qcom,hsusb-otg-mpm-dpsehv-int",
+ &pdata->mpm_dpshv_int);
+ of_property_read_u32(node, "qcom,hsusb-otg-mpm-dmsehv-int",
+ &pdata->mpm_dmshv_int);
+ pdata->pmic_id_irq = platform_get_irq_byname(pdev, "pmic_id_irq");
+ if (pdata->pmic_id_irq < 0)
+ pdata->pmic_id_irq = 0;
- motg->switch_gpio = devm_gpiod_get_optional(&pdev->dev, "switch",
- GPIOD_OUT_LOW);
- if (IS_ERR(motg->switch_gpio))
- return PTR_ERR(motg->switch_gpio);
+ pdata->hub_reset_gpio = of_get_named_gpio(
+ node, "qcom,hub-reset-gpio", 0);
+ if (pdata->hub_reset_gpio < 0)
+ pr_debug("hub_reset_gpio is not available\n");
- ext_id = ERR_PTR(-ENODEV);
- ext_vbus = ERR_PTR(-ENODEV);
- if (of_property_read_bool(node, "extcon")) {
+ pdata->usbeth_reset_gpio = of_get_named_gpio(
+ node, "qcom,usbeth-reset-gpio", 0);
+ if (pdata->usbeth_reset_gpio < 0)
+ pr_debug("usbeth_reset_gpio is not available\n");
- /* Each one of them is not mandatory */
- ext_vbus = extcon_get_edev_by_phandle(&pdev->dev, 0);
- if (IS_ERR(ext_vbus) && PTR_ERR(ext_vbus) != -ENODEV)
- return PTR_ERR(ext_vbus);
+ pdata->switch_sel_gpio =
+ of_get_named_gpio(node, "qcom,sw-sel-gpio", 0);
+ if (pdata->switch_sel_gpio < 0)
+ pr_debug("switch_sel_gpio is not available\n");
- ext_id = extcon_get_edev_by_phandle(&pdev->dev, 1);
- if (IS_ERR(ext_id) && PTR_ERR(ext_id) != -ENODEV)
- return PTR_ERR(ext_id);
- }
+ pdata->usb_id_gpio =
+ of_get_named_gpio(node, "qcom,usbid-gpio", 0);
+ if (pdata->usb_id_gpio < 0)
+ pr_debug("usb_id_gpio is not available\n");
- if (!IS_ERR(ext_vbus)) {
- motg->vbus.extcon = ext_vbus;
- motg->vbus.nb.notifier_call = msm_otg_vbus_notifier;
- ret = extcon_register_notifier(ext_vbus, EXTCON_USB,
- &motg->vbus.nb);
- if (ret < 0) {
- dev_err(&pdev->dev, "register VBUS notifier failed\n");
- return ret;
- }
+ pdata->l1_supported = of_property_read_bool(node,
+ "qcom,hsusb-l1-supported");
+ pdata->enable_ahb2ahb_bypass = of_property_read_bool(node,
+ "qcom,ahb-async-bridge-bypass");
+ pdata->disable_retention_with_vdd_min = of_property_read_bool(node,
+ "qcom,disable-retention-with-vdd-min");
+ pdata->enable_phy_id_pullup = of_property_read_bool(node,
+ "qcom,enable-phy-id-pullup");
+ pdata->phy_dvdd_always_on = of_property_read_bool(node,
+ "qcom,phy-dvdd-always-on");
- ret = extcon_get_cable_state_(ext_vbus, EXTCON_USB);
- if (ret)
- set_bit(B_SESS_VLD, &motg->inputs);
- else
- clear_bit(B_SESS_VLD, &motg->inputs);
- }
+ res_gpio = of_get_named_gpio(node, "qcom,hsusb-otg-vddmin-gpio", 0);
+ if (res_gpio < 0)
+ res_gpio = 0;
+ pdata->vddmin_gpio = res_gpio;
- if (!IS_ERR(ext_id)) {
- motg->id.extcon = ext_id;
- motg->id.nb.notifier_call = msm_otg_id_notifier;
- ret = extcon_register_notifier(ext_id, EXTCON_USB_HOST,
- &motg->id.nb);
- if (ret < 0) {
- dev_err(&pdev->dev, "register ID notifier failed\n");
- extcon_unregister_notifier(motg->vbus.extcon,
- EXTCON_USB, &motg->vbus.nb);
- return ret;
- }
+ pdata->emulation = of_property_read_bool(node,
+ "qcom,emulation");
- ret = extcon_get_cable_state_(ext_id, EXTCON_USB_HOST);
- if (ret)
- clear_bit(ID, &motg->inputs);
- else
- set_bit(ID, &motg->inputs);
- }
+ pdata->enable_streaming = of_property_read_bool(node,
+ "qcom,boost-sysclk-with-streaming");
- prop = of_find_property(node, "qcom,phy-init-sequence", &len);
- if (!prop || !len)
- return 0;
+ pdata->enable_axi_prefetch = of_property_read_bool(node,
+ "qcom,axi-prefetch-enable");
- words = len / sizeof(u32);
-
- if (words >= ULPI_EXT_VENDOR_SPECIFIC) {
- dev_warn(&pdev->dev, "Too big PHY init sequence %d\n", words);
- return 0;
- }
-
- pdata->phy_init_seq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
- if (!pdata->phy_init_seq)
- return 0;
-
- ret = of_property_read_u32_array(node, "qcom,phy-init-sequence",
- pdata->phy_init_seq, words);
- if (!ret)
- pdata->phy_init_sz = words;
-
- return 0;
-}
-
-static int msm_otg_reboot_notify(struct notifier_block *this,
- unsigned long code, void *unused)
-{
- struct msm_otg *motg = container_of(this, struct msm_otg, reboot);
-
- /*
- * Ensure that D+/D- lines are routed to uB connector, so
- * we could load bootloader/kernel at next reboot
- */
- gpiod_set_value_cansleep(motg->switch_gpio, 0);
- return NOTIFY_DONE;
+ pdata->enable_sdp_typec_current_limit = of_property_read_bool(node,
+ "qcom,enable-sdp-typec-current-limit");
+ pdata->vbus_low_as_hostmode = of_property_read_bool(node,
+ "qcom,vbus-low-as-hostmode");
+ return pdata;
}
static int msm_otg_probe(struct platform_device *pdev)
{
- struct regulator_bulk_data regs[3];
int ret = 0;
- struct device_node *np = pdev->dev.of_node;
- struct msm_otg_platform_data *pdata;
+ int len = 0;
+ u32 tmp[3];
struct resource *res;
struct msm_otg *motg;
struct usb_phy *phy;
- void __iomem *phy_select;
+ struct msm_otg_platform_data *pdata;
+ void __iomem *tcsr;
+ int id_irq = 0;
- motg = devm_kzalloc(&pdev->dev, sizeof(struct msm_otg), GFP_KERNEL);
- if (!motg)
- return -ENOMEM;
+ dev_info(&pdev->dev, "msm_otg probe\n");
- motg->phy.otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg),
- GFP_KERNEL);
- if (!motg->phy.otg)
- return -ENOMEM;
-
- phy = &motg->phy;
- phy->dev = &pdev->dev;
-
- motg->clk = devm_clk_get(&pdev->dev, np ? "core" : "usb_hs_clk");
- if (IS_ERR(motg->clk)) {
- dev_err(&pdev->dev, "failed to get usb_hs_clk\n");
- return PTR_ERR(motg->clk);
+ motg = kzalloc(sizeof(struct msm_otg), GFP_KERNEL);
+ if (!motg) {
+ ret = -ENOMEM;
+ return ret;
}
/*
- * If USB Core is running its protocol engine based on CORE CLK,
+ * USB Core is running its protocol engine based on CORE CLK,
* CORE CLK must be running at >55Mhz for correct HSUSB
* operation and USB core cannot tolerate frequency changes on
- * CORE CLK.
+ * CORE CLK. For such USB cores, vote for maximum clk frequency
+ * on pclk source
*/
- motg->pclk = devm_clk_get(&pdev->dev, np ? "iface" : "usb_hs_pclk");
- if (IS_ERR(motg->pclk)) {
- dev_err(&pdev->dev, "failed to get usb_hs_pclk\n");
- return PTR_ERR(motg->pclk);
+ motg->core_clk = clk_get(&pdev->dev, "core_clk");
+ if (IS_ERR(motg->core_clk)) {
+ ret = PTR_ERR(motg->core_clk);
+ motg->core_clk = NULL;
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get core_clk\n");
+ goto free_motg;
+ }
+
+ motg->core_reset = devm_reset_control_get(&pdev->dev, "core_reset");
+ if (IS_ERR(motg->core_reset)) {
+ dev_err(&pdev->dev, "failed to get core_reset\n");
+ ret = PTR_ERR(motg->core_reset);
+ goto put_core_clk;
}
/*
- * USB core clock is not present on all MSM chips. This
- * clock is introduced to remove the dependency on AXI
- * bus frequency.
+ * USB Core CLK can run at max freq if streaming is enabled. Hence,
+ * get Max supported clk frequency for USB Core CLK and request to set
+ * the same. Otherwise set USB Core CLK to defined default value.
*/
- motg->core_clk = devm_clk_get(&pdev->dev,
- np ? "alt_core" : "usb_hs_core_clk");
+ if (of_property_read_u32(pdev->dev.of_node,
+ "qcom,max-nominal-sysclk-rate", &ret)) {
+ ret = -EINVAL;
+ goto put_core_clk;
+ } else {
+ motg->core_clk_nominal_rate = clk_round_rate(motg->core_clk,
+ ret);
+ }
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
- motg->regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!motg->regs)
- return -ENOMEM;
+ if (of_property_read_u32(pdev->dev.of_node,
+ "qcom,max-svs-sysclk-rate", &ret)) {
+ dev_dbg(&pdev->dev, "core_clk svs freq not specified\n");
+ } else {
+ motg->core_clk_svs_rate = clk_round_rate(motg->core_clk, ret);
+ }
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- if (!np)
- return -ENXIO;
- ret = msm_otg_read_dt(pdev, motg);
+ motg->default_noc_mode = USB_NOC_NOM_VOTE;
+ if (of_property_read_bool(pdev->dev.of_node, "qcom,default-mode-svs")) {
+ motg->core_clk_rate = motg->core_clk_svs_rate;
+ motg->default_noc_mode = USB_NOC_SVS_VOTE;
+ } else if (of_property_read_bool(pdev->dev.of_node,
+ "qcom,boost-sysclk-with-streaming")) {
+ motg->core_clk_rate = motg->core_clk_nominal_rate;
+ } else {
+ motg->core_clk_rate = clk_round_rate(motg->core_clk,
+ USB_DEFAULT_SYSTEM_CLOCK);
+ }
+
+ if (IS_ERR_VALUE(motg->core_clk_rate)) {
+ dev_err(&pdev->dev, "fail to get core clk max freq.\n");
+ } else {
+ ret = clk_set_rate(motg->core_clk, motg->core_clk_rate);
if (ret)
- return ret;
+ dev_err(&pdev->dev, "fail to set core_clk freq:%d\n",
+ ret);
+ }
+
+ motg->pclk = clk_get(&pdev->dev, "iface_clk");
+ if (IS_ERR(motg->pclk)) {
+ ret = PTR_ERR(motg->pclk);
+ motg->pclk = NULL;
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get iface_clk\n");
+ goto put_core_clk;
+ }
+
+ motg->xo_clk = clk_get(&pdev->dev, "xo");
+ if (IS_ERR(motg->xo_clk)) {
+ ret = PTR_ERR(motg->xo_clk);
+ motg->xo_clk = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto put_pclk;
}
/*
- * NOTE: The PHYs can be multiplexed between the chipidea controller
- * and the dwc3 controller, using a single bit. It is important that
- * the dwc3 driver does not set this bit in an incompatible way.
+ * On few platforms USB PHY is fed with sleep clk.
+ * Hence don't fail probe.
*/
- if (motg->phy_number) {
- phy_select = devm_ioremap_nocache(&pdev->dev, USB2_PHY_SEL, 4);
- if (!phy_select) {
- ret = -ENOMEM;
- goto unregister_extcon;
+ motg->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
+ if (IS_ERR(motg->sleep_clk)) {
+ ret = PTR_ERR(motg->sleep_clk);
+ motg->sleep_clk = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto put_xo_clk;
+ else
+ dev_dbg(&pdev->dev, "failed to get sleep_clk\n");
+ } else {
+ ret = clk_prepare_enable(motg->sleep_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "%s failed to vote sleep_clk%d\n",
+ __func__, ret);
+ goto put_xo_clk;
}
- /* Enable second PHY with the OTG port */
- writel(0x1, phy_select);
}
- dev_info(&pdev->dev, "OTG regs = %p\n", motg->regs);
+ /*
+ * If present, phy_reset_clk is used to reset the PHY, ULPI bridge
+ * and CSR Wrapper. This is a reset only clock.
+ */
+
+ if (of_property_match_string(pdev->dev.of_node,
+ "clock-names", "phy_reset_clk") >= 0) {
+ motg->phy_reset_clk = devm_clk_get(&pdev->dev, "phy_reset_clk");
+ if (IS_ERR(motg->phy_reset_clk)) {
+ ret = PTR_ERR(motg->phy_reset_clk);
+ goto disable_sleep_clk;
+ }
+
+ motg->phy_reset = devm_reset_control_get(&pdev->dev,
+ "phy_reset");
+ if (IS_ERR(motg->phy_reset)) {
+ dev_err(&pdev->dev, "failed to get phy_reset\n");
+ ret = PTR_ERR(motg->phy_reset);
+ goto disable_sleep_clk;
+ }
+ }
+
+ /*
+ * If present, phy_por_clk is used to assert/de-assert phy POR
+ * input. This is a reset only clock. phy POR must be asserted
+ * after overriding the parameter registers via CSR wrapper or
+ * ULPI bridge.
+ */
+ if (of_property_match_string(pdev->dev.of_node,
+ "clock-names", "phy_por_clk") >= 0) {
+ motg->phy_por_clk = devm_clk_get(&pdev->dev, "phy_por_clk");
+ if (IS_ERR(motg->phy_por_clk)) {
+ ret = PTR_ERR(motg->phy_por_clk);
+ goto disable_sleep_clk;
+ }
+
+ motg->phy_por_reset = devm_reset_control_get(&pdev->dev,
+ "phy_por_reset");
+ if (IS_ERR(motg->phy_por_reset)) {
+ dev_err(&pdev->dev, "failed to get phy_por_reset\n");
+ ret = PTR_ERR(motg->phy_por_reset);
+ goto disable_sleep_clk;
+ }
+ }
+
+ /*
+ * If present, phy_csr_clk is required for accessing PHY
+ * CSR registers via AHB2PHY interface.
+ */
+ if (of_property_match_string(pdev->dev.of_node,
+ "clock-names", "phy_csr_clk") >= 0) {
+ motg->phy_csr_clk = devm_clk_get(&pdev->dev, "phy_csr_clk");
+ if (IS_ERR(motg->phy_csr_clk)) {
+ ret = PTR_ERR(motg->phy_csr_clk);
+ goto disable_sleep_clk;
+ } else {
+ ret = clk_prepare_enable(motg->phy_csr_clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fail to enable phy csr clk %d\n", ret);
+ goto disable_sleep_clk;
+ }
+ }
+ }
+
+ of_property_read_u32(pdev->dev.of_node, "qcom,pm-qos-latency",
+ &motg->pm_qos_latency);
+
+ pdata = msm_otg_dt_to_pdata(pdev);
+ if (!pdata) {
+ ret = -ENOMEM;
+ goto disable_phy_csr_clk;
+ }
+ pdev->dev.platform_data = pdata;
+
+ pdata->bus_scale_table = msm_bus_cl_get_pdata(pdev);
+ if (!pdata->bus_scale_table)
+ dev_dbg(&pdev->dev, "bus scaling is disabled\n");
+
+ if (pdata->phy_type == QUSB_ULPI_PHY) {
+ if (of_property_match_string(pdev->dev.of_node,
+ "clock-names", "phy_ref_clk") >= 0) {
+ motg->phy_ref_clk = devm_clk_get(&pdev->dev,
+ "phy_ref_clk");
+ if (IS_ERR(motg->phy_ref_clk)) {
+ ret = PTR_ERR(motg->phy_ref_clk);
+ goto disable_phy_csr_clk;
+ } else {
+ ret = clk_prepare_enable(motg->phy_ref_clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fail to enable phy ref clk %d\n",
+ ret);
+ goto disable_phy_csr_clk;
+ }
+ }
+ }
+ }
+
+ motg->phy.otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg),
+ GFP_KERNEL);
+ if (!motg->phy.otg) {
+ ret = -ENOMEM;
+ goto disable_phy_csr_clk;
+ }
+
+ the_msm_otg = motg;
+ motg->pdata = pdata;
+ phy = &motg->phy;
+ phy->dev = &pdev->dev;
+ motg->pdev = pdev;
+ motg->dbg_idx = 0;
+ motg->dbg_lock = __RW_LOCK_UNLOCKED(lck);
+
+ if (motg->pdata->bus_scale_table) {
+ motg->bus_perf_client =
+ msm_bus_scale_register_client(motg->pdata->bus_scale_table);
+ if (!motg->bus_perf_client) {
+ dev_err(motg->phy.dev, "%s: Failed to register BUS\n"
+ "scaling client!!\n", __func__);
+ } else {
+ debug_bus_voting_enabled = true;
+ /* Some platforms require BUS vote to control clocks */
+ msm_otg_bus_vote(motg, USB_MIN_PERF_VOTE);
+ }
+ }
+
+ ret = msm_otg_bus_freq_get(motg);
+ if (ret) {
+ pr_err("failed to get noc clocks: %d\n", ret);
+ } else {
+ ret = msm_otg_bus_freq_set(motg, motg->default_noc_mode);
+ if (ret)
+ pr_err("failed to vote explicit noc rates: %d\n", ret);
+ }
+
+ /* initialize reset counter */
+ motg->reset_counter = 0;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get core iomem resource\n");
+ ret = -ENODEV;
+ goto devote_bus_bw;
+ }
+
+ motg->io_res = res;
+ motg->regs = ioremap(res->start, resource_size(res));
+ if (!motg->regs) {
+ dev_err(&pdev->dev, "core iomem ioremap failed\n");
+ ret = -ENOMEM;
+ goto devote_bus_bw;
+ }
+ dev_info(&pdev->dev, "OTG regs = %pK\n", motg->regs);
+
+ if (pdata->enable_sec_phy) {
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "tcsr");
+ if (!res) {
+ dev_dbg(&pdev->dev, "missing TCSR memory resource\n");
+ } else {
+ tcsr = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (!tcsr) {
+ dev_dbg(&pdev->dev, "tcsr ioremap failed\n");
+ } else {
+ /* Enable USB2 on secondary HSPHY. */
+ writel_relaxed(0x1, tcsr);
+ /*
+ * Ensure that TCSR write is completed before
+ * USB registers initialization.
+ */
+ mb();
+ }
+ }
+ }
+
+ if (pdata->enable_sec_phy)
+ motg->usb_phy_ctrl_reg = USB_PHY_CTRL2;
+ else
+ motg->usb_phy_ctrl_reg = USB_PHY_CTRL;
+
+ /*
+ * The USB PHY wrapper provides a register interface
+ * through AHB2PHY for performing PHY related operations
+ * like retention, HV interrupts and overriding parameter
+ * registers etc. The registers start at 4 byte boundary
+ * but only the first byte is valid and remaining are not
+ * used. Relaxed versions of readl/writel should be used.
+ *
+ * The link does not have any PHY specific registers.
+ * Hence set motg->usb_phy_ctrl_reg to.
+ */
+ if (motg->pdata->phy_type == SNPS_FEMTO_PHY ||
+ pdata->phy_type == QUSB_ULPI_PHY) {
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "phy_csr");
+ if (!res) {
+ dev_err(&pdev->dev, "PHY CSR IOMEM missing!\n");
+ ret = -ENODEV;
+ goto free_regs;
+ }
+ motg->phy_csr_regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(motg->phy_csr_regs)) {
+ ret = PTR_ERR(motg->phy_csr_regs);
+ dev_err(&pdev->dev, "PHY CSR ioremap failed!\n");
+ goto free_regs;
+ }
+ motg->usb_phy_ctrl_reg = 0;
+ }
motg->irq = platform_get_irq(pdev, 0);
- if (motg->irq < 0) {
+ if (!motg->irq) {
dev_err(&pdev->dev, "platform_get_irq failed\n");
- ret = motg->irq;
- goto unregister_extcon;
+ ret = -ENODEV;
+ goto free_regs;
}
- regs[0].supply = "vddcx";
- regs[1].supply = "v3p3";
- regs[2].supply = "v1p8";
+ motg->async_irq = platform_get_irq_byname(pdev, "async_irq");
+ if (motg->async_irq < 0) {
+ dev_err(&pdev->dev, "platform_get_irq for async_int failed\n");
+ motg->async_irq = 0;
+ goto free_regs;
+ }
- ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(regs), regs);
- if (ret)
- goto unregister_extcon;
+ if (motg->xo_clk) {
+ ret = clk_prepare_enable(motg->xo_clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s failed to vote for TCXO %d\n",
+ __func__, ret);
+ goto free_xo_handle;
+ }
+ }
- motg->vddcx = regs[0].consumer;
- motg->v3p3 = regs[1].consumer;
- motg->v1p8 = regs[2].consumer;
- clk_set_rate(motg->clk, 60000000);
-
- clk_prepare_enable(motg->clk);
clk_prepare_enable(motg->pclk);
- if (!IS_ERR(motg->core_clk))
- clk_prepare_enable(motg->core_clk);
+ hsusb_vdd = devm_regulator_get(motg->phy.dev, "hsusb_vdd_dig");
+ if (IS_ERR(hsusb_vdd)) {
+ hsusb_vdd = devm_regulator_get(motg->phy.dev, "HSUSB_VDDCX");
+ if (IS_ERR(hsusb_vdd)) {
+ dev_err(motg->phy.dev, "unable to get hsusb vddcx\n");
+ ret = PTR_ERR(hsusb_vdd);
+ goto devote_xo_handle;
+ }
+ }
- ret = msm_hsusb_init_vddcx(motg, 1);
+ if (of_get_property(pdev->dev.of_node,
+ "qcom,vdd-voltage-level",
+ &len)){
+ if (len == sizeof(tmp)) {
+ of_property_read_u32_array(pdev->dev.of_node,
+ "qcom,vdd-voltage-level",
+ tmp, len/sizeof(*tmp));
+ vdd_val[0] = tmp[0];
+ vdd_val[1] = tmp[1];
+ vdd_val[2] = tmp[2];
+ } else {
+ dev_dbg(&pdev->dev,
+ "Using default hsusb vdd config.\n");
+ goto devote_xo_handle;
+ }
+ } else {
+ goto devote_xo_handle;
+ }
+
+ ret = msm_hsusb_config_vddcx(1);
if (ret) {
dev_err(&pdev->dev, "hsusb vddcx configuration failed\n");
- goto disable_clks;
+ goto devote_xo_handle;
+ }
+
+ ret = regulator_enable(hsusb_vdd);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable the hsusb vddcx\n");
+ goto free_config_vddcx;
}
ret = msm_hsusb_ldo_init(motg, 1);
if (ret) {
dev_err(&pdev->dev, "hsusb vreg configuration failed\n");
- goto disable_vddcx;
+ goto free_hsusb_vdd;
}
- ret = msm_hsusb_ldo_set_mode(motg, 1);
+
+ /* Get pinctrl if target uses pinctrl */
+ motg->phy_pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR(motg->phy_pinctrl)) {
+ if (of_property_read_bool(pdev->dev.of_node, "pinctrl-names")) {
+ dev_err(&pdev->dev, "Error encountered while getting pinctrl");
+ ret = PTR_ERR(motg->phy_pinctrl);
+ goto free_ldo_init;
+ }
+ dev_dbg(&pdev->dev, "Target does not use pinctrl\n");
+ motg->phy_pinctrl = NULL;
+ }
+
+ ret = msm_hsusb_ldo_enable(motg, USB_PHY_REG_ON);
if (ret) {
dev_err(&pdev->dev, "hsusb vreg enable failed\n");
- goto disable_ldo;
+ goto free_ldo_init;
}
+ clk_prepare_enable(motg->core_clk);
- writel(0, USB_USBINTR);
- writel(0, USB_OTGSC);
+ /* Check if USB mem_type change is needed to workaround PNOC hw issue */
+ msm_otg_pnoc_errata_fix(motg);
+ writel_relaxed(0, USB_USBINTR);
+ writel_relaxed(0, USB_OTGSC);
+ /* Ensure that above STOREs are completed before enabling interrupts */
+ mb();
+
+ motg->id_state = USB_ID_FLOAT;
+ set_bit(ID, &motg->inputs);
INIT_WORK(&motg->sm_work, msm_otg_sm_work);
INIT_DELAYED_WORK(&motg->chg_work, msm_chg_detect_work);
- ret = devm_request_irq(&pdev->dev, motg->irq, msm_otg_irq, IRQF_SHARED,
+ INIT_DELAYED_WORK(&motg->id_status_work, msm_id_status_w);
+ INIT_DELAYED_WORK(&motg->perf_vote_work, msm_otg_perf_vote_work);
+ setup_timer(&motg->chg_check_timer, msm_otg_chg_check_timer_func,
+ (unsigned long) motg);
+ motg->otg_wq = alloc_ordered_workqueue("k_otg", 0);
+ if (!motg->otg_wq) {
+ pr_err("%s: Unable to create workqueue otg_wq\n",
+ __func__);
+ goto disable_core_clk;
+ }
+
+ ret = request_irq(motg->irq, msm_otg_irq, IRQF_SHARED,
"msm_otg", motg);
if (ret) {
dev_err(&pdev->dev, "request irq failed\n");
- goto disable_ldo;
+ goto destroy_wq;
}
- phy->init = msm_phy_init;
+ motg->phy_irq = platform_get_irq_byname(pdev, "phy_irq");
+ if (motg->phy_irq < 0) {
+ dev_dbg(&pdev->dev, "phy_irq is not present\n");
+ motg->phy_irq = 0;
+ } else {
+
+ /* clear all interrupts before enabling the IRQ */
+ writeb_relaxed(0xFF, USB2_PHY_USB_PHY_INTERRUPT_CLEAR0);
+ writeb_relaxed(0xFF, USB2_PHY_USB_PHY_INTERRUPT_CLEAR1);
+
+ writeb_relaxed(0x1, USB2_PHY_USB_PHY_IRQ_CMD);
+ /*
+ * Databook says 200 usec delay is required for
+ * clearing the interrupts.
+ */
+ udelay(200);
+ writeb_relaxed(0x0, USB2_PHY_USB_PHY_IRQ_CMD);
+
+ ret = request_irq(motg->phy_irq, msm_otg_phy_irq_handler,
+ IRQF_TRIGGER_RISING, "msm_otg_phy_irq", motg);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "phy_irq request fail %d\n", ret);
+ goto free_irq;
+ }
+ }
+
+ ret = request_irq(motg->async_irq, msm_otg_irq,
+ IRQF_TRIGGER_RISING, "msm_otg", motg);
+ if (ret) {
+ dev_err(&pdev->dev, "request irq failed (ASYNC INT)\n");
+ goto free_phy_irq;
+ }
+ disable_irq(motg->async_irq);
+
+ if (pdata->otg_control == OTG_PHY_CONTROL && pdata->mpm_otgsessvld_int)
+ msm_mpm_enable_pin(pdata->mpm_otgsessvld_int, 1);
+
+ if (pdata->mpm_dpshv_int)
+ msm_mpm_enable_pin(pdata->mpm_dpshv_int, 1);
+ if (pdata->mpm_dmshv_int)
+ msm_mpm_enable_pin(pdata->mpm_dmshv_int, 1);
+
+ phy->init = msm_otg_reset;
phy->set_power = msm_otg_set_power;
- phy->notify_disconnect = msm_phy_notify_disconnect;
- phy->type = USB_PHY_TYPE_USB2;
+ phy->set_suspend = msm_otg_set_suspend;
+ phy->dbg_event = msm_otg_dbg_log_event;
phy->io_ops = &msm_otg_io_ops;
phy->otg->usb_phy = &motg->phy;
phy->otg->set_host = msm_otg_set_host;
phy->otg->set_peripheral = msm_otg_set_peripheral;
+ if (pdata->dp_manual_pullup)
+ phy->flags |= ENABLE_DP_MANUAL_PULLUP;
- msm_usb_reset(phy);
+ if (pdata->enable_sec_phy)
+ phy->flags |= ENABLE_SECONDARY_PHY;
- ret = usb_add_phy_dev(&motg->phy);
+ ret = usb_add_phy(&motg->phy, USB_PHY_TYPE_USB2);
if (ret) {
dev_err(&pdev->dev, "usb_add_phy failed\n");
- goto disable_ldo;
+ goto free_async_irq;
+ }
+
+ ret = usb_phy_regulator_init(motg);
+ if (ret) {
+ dev_err(&pdev->dev, "usb_phy_regulator_init failed\n");
+ goto remove_phy;
+ }
+
+ if (motg->pdata->mode == USB_OTG &&
+ motg->pdata->otg_control == OTG_PMIC_CONTROL &&
+ !motg->phy_irq) {
+
+ if (gpio_is_valid(motg->pdata->usb_id_gpio)) {
+ /* usb_id_gpio request */
+ ret = gpio_request(motg->pdata->usb_id_gpio,
+ "USB_ID_GPIO");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "gpio req failed for id\n");
+ motg->pdata->usb_id_gpio = 0;
+ goto remove_phy;
+ }
+
+ /*
+ * The following code implements switch between the HOST
+ * mode to device mode when used different HW components
+ * on the same port: USB HUB and the usb jack type B
+ * for device mode In this case HUB should be gone
+ * only once out of reset at the boot time and after
+ * that always stay on
+ */
+ if (gpio_is_valid(motg->pdata->hub_reset_gpio)) {
+ ret = devm_gpio_request(&pdev->dev,
+ motg->pdata->hub_reset_gpio,
+ "qcom,hub-reset-gpio");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "gpio req failed for hub reset\n");
+ goto remove_phy;
+ }
+ gpio_direction_output(
+ motg->pdata->hub_reset_gpio, 1);
+ }
+
+ if (gpio_is_valid(motg->pdata->switch_sel_gpio)) {
+ ret = devm_gpio_request(&pdev->dev,
+ motg->pdata->switch_sel_gpio,
+ "qcom,sw-sel-gpio");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "gpio req failed for switch sel\n");
+ goto remove_phy;
+ }
+ if (gpio_get_value(motg->pdata->usb_id_gpio))
+ gpio_direction_input(
+ motg->pdata->switch_sel_gpio);
+
+ else
+ gpio_direction_output(
+ motg->pdata->switch_sel_gpio,
+ 1);
+ }
+
+ /* usb_id_gpio to irq */
+ id_irq = gpio_to_irq(motg->pdata->usb_id_gpio);
+ motg->ext_id_irq = id_irq;
+ } else if (motg->pdata->pmic_id_irq) {
+ id_irq = motg->pdata->pmic_id_irq;
+ }
+
+ if (id_irq) {
+ ret = request_irq(id_irq,
+ msm_id_irq,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ "msm_otg", motg);
+ if (ret) {
+ dev_err(&pdev->dev, "request irq failed for ID\n");
+ goto remove_phy;
+ }
+ } else {
+ /* PMIC does USB ID detection and notifies through
+ * USB_OTG property of USB powersupply.
+ */
+ dev_dbg(&pdev->dev, "PMIC does ID detection\n");
+ }
}
platform_set_drvdata(pdev, motg);
device_init_wakeup(&pdev->dev, 1);
- if (motg->pdata->mode == USB_DR_MODE_OTG &&
- motg->pdata->otg_control == OTG_USER_CONTROL) {
- ret = msm_otg_debugfs_init(motg);
- if (ret)
- dev_dbg(&pdev->dev, "Can not create mode change file\n");
- }
+ ret = msm_otg_debugfs_init(motg);
+ if (ret)
+ dev_dbg(&pdev->dev, "mode debugfs file is not available\n");
- if (test_bit(B_SESS_VLD, &motg->inputs)) {
- /* Switch D+/D- lines to Device connector */
- gpiod_set_value_cansleep(motg->switch_gpio, 0);
- } else {
- /* Switch D+/D- lines to Hub */
- gpiod_set_value_cansleep(motg->switch_gpio, 1);
- }
+ if (motg->pdata->otg_control == OTG_PMIC_CONTROL &&
+ (!(motg->pdata->mode == USB_OTG) ||
+ motg->pdata->pmic_id_irq || motg->ext_id_irq ||
+ !motg->phy_irq))
+ motg->caps = ALLOW_PHY_POWER_COLLAPSE | ALLOW_PHY_RETENTION;
- motg->reboot.notifier_call = msm_otg_reboot_notify;
- register_reboot_notifier(&motg->reboot);
+ if (motg->pdata->otg_control == OTG_PHY_CONTROL || motg->phy_irq ||
+ motg->pdata->enable_phy_id_pullup)
+ motg->caps = ALLOW_PHY_RETENTION | ALLOW_PHY_REGULATORS_LPM;
+ if (motg->pdata->mpm_dpshv_int || motg->pdata->mpm_dmshv_int)
+ motg->caps |= ALLOW_HOST_PHY_RETENTION;
+
+ device_create_file(&pdev->dev, &dev_attr_dpdm_pulldown_enable);
+
+ if (motg->pdata->enable_lpm_on_dev_suspend)
+ motg->caps |= ALLOW_LPM_ON_DEV_SUSPEND;
+
+ if (motg->pdata->disable_retention_with_vdd_min)
+ motg->caps |= ALLOW_VDD_MIN_WITH_RETENTION_DISABLED;
+
+ /*
+ * PHY DVDD is supplied by a always on PMIC LDO (unlike
+ * vddcx/vddmx). PHY can keep D+ pull-up and D+/D-
+ * pull-down during suspend without any additional
+ * hardware re-work.
+ */
+ if (motg->pdata->phy_type == SNPS_FEMTO_PHY)
+ motg->caps |= ALLOW_BUS_SUSPEND_WITHOUT_REWORK;
+
+ pm_stay_awake(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
+ if (motg->pdata->delay_lpm_on_disconnect) {
+ pm_runtime_set_autosuspend_delay(&pdev->dev,
+ lpm_disconnect_thresh);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ }
+
+ ret = msm_otg_setup_ext_chg_cdev(motg);
+ if (ret)
+ dev_dbg(&pdev->dev, "fail to setup cdev\n");
+
+ if (pdev->dev.of_node) {
+ ret = msm_otg_setup_devices(pdev, pdata->mode, true);
+ if (ret) {
+ dev_err(&pdev->dev, "devices setup failed\n");
+ goto remove_cdev;
+ }
+ }
+
+ psy = power_supply_get_by_name("usb");
+ if (!psy) {
+ dev_dbg(&pdev->dev, "Could not get usb power_supply\n");
+ ret = -EPROBE_DEFER;
+ goto otg_remove_devices;
+ }
+
+
+ ret = msm_otg_extcon_register(motg);
+ if (ret)
+ goto put_psy;
+
+ if (motg->extcon_vbus) {
+ ret = extcon_get_cable_state_(motg->extcon_vbus, EXTCON_USB);
+ if (ret)
+ set_bit(B_SESS_VLD, &motg->inputs);
+ else
+ clear_bit(B_SESS_VLD, &motg->inputs);
+ }
+
+ if (motg->extcon_id) {
+ ret = extcon_get_cable_state_(motg->extcon_id, EXTCON_USB_HOST);
+ if (ret)
+ clear_bit(ID, &motg->inputs);
+ else
+ set_bit(ID, &motg->inputs);
+ }
+
+ if (gpio_is_valid(motg->pdata->hub_reset_gpio)) {
+ ret = devm_gpio_request(&pdev->dev,
+ motg->pdata->hub_reset_gpio,
+ "HUB_RESET");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "gpio req failed for hub_reset\n");
+ } else {
+ gpio_direction_output(
+ motg->pdata->hub_reset_gpio, 0);
+ /* 5 microsecs reset signaling to usb hub */
+ usleep_range(5, 10);
+ gpio_direction_output(
+ motg->pdata->hub_reset_gpio, 1);
+ }
+ }
+
+ if (gpio_is_valid(motg->pdata->usbeth_reset_gpio)) {
+ ret = devm_gpio_request(&pdev->dev,
+ motg->pdata->usbeth_reset_gpio,
+ "ETH_RESET");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "gpio req failed for usbeth_reset\n");
+ } else {
+ gpio_direction_output(
+ motg->pdata->usbeth_reset_gpio, 0);
+ /* 100 microsecs reset signaling to usb-to-eth */
+ usleep_range(100, 110);
+ gpio_direction_output(
+ motg->pdata->usbeth_reset_gpio, 1);
+ }
+ }
+
+ motg->pm_notify.notifier_call = msm_otg_pm_notify;
+ register_pm_notifier(&motg->pm_notify);
+ msm_otg_dbg_log_event(phy, "OTG PROBE", motg->caps, motg->lpm_flags);
+
return 0;
-disable_ldo:
+put_psy:
+ if (psy)
+ power_supply_put(psy);
+otg_remove_devices:
+ if (pdev->dev.of_node)
+ msm_otg_setup_devices(pdev, motg->pdata->mode, false);
+remove_cdev:
+ if (!motg->ext_chg_device) {
+ device_destroy(motg->ext_chg_class, motg->ext_chg_dev);
+ cdev_del(&motg->ext_chg_cdev);
+ class_destroy(motg->ext_chg_class);
+ unregister_chrdev_region(motg->ext_chg_dev, 1);
+ }
+remove_phy:
+ usb_remove_phy(&motg->phy);
+free_async_irq:
+ free_irq(motg->async_irq, motg);
+free_phy_irq:
+ if (motg->phy_irq)
+ free_irq(motg->phy_irq, motg);
+free_irq:
+ free_irq(motg->irq, motg);
+destroy_wq:
+ destroy_workqueue(motg->otg_wq);
+disable_core_clk:
+ clk_disable_unprepare(motg->core_clk);
+ msm_hsusb_ldo_enable(motg, USB_PHY_REG_OFF);
+free_ldo_init:
msm_hsusb_ldo_init(motg, 0);
-disable_vddcx:
- msm_hsusb_init_vddcx(motg, 0);
-disable_clks:
+free_hsusb_vdd:
+ regulator_disable(hsusb_vdd);
+free_config_vddcx:
+ regulator_set_voltage(hsusb_vdd,
+ vdd_val[VDD_NONE],
+ vdd_val[VDD_MAX]);
+devote_xo_handle:
clk_disable_unprepare(motg->pclk);
- clk_disable_unprepare(motg->clk);
- if (!IS_ERR(motg->core_clk))
- clk_disable_unprepare(motg->core_clk);
-unregister_extcon:
- extcon_unregister_notifier(motg->id.extcon,
- EXTCON_USB_HOST, &motg->id.nb);
- extcon_unregister_notifier(motg->vbus.extcon,
- EXTCON_USB, &motg->vbus.nb);
-
+ if (motg->xo_clk)
+ clk_disable_unprepare(motg->xo_clk);
+free_xo_handle:
+ if (motg->xo_clk) {
+ clk_put(motg->xo_clk);
+ motg->xo_clk = NULL;
+ }
+free_regs:
+ iounmap(motg->regs);
+devote_bus_bw:
+ if (motg->bus_perf_client) {
+ msm_otg_bus_vote(motg, USB_NO_PERF_VOTE);
+ msm_bus_scale_unregister_client(motg->bus_perf_client);
+ }
+disable_phy_csr_clk:
+ if (motg->phy_csr_clk)
+ clk_disable_unprepare(motg->phy_csr_clk);
+disable_sleep_clk:
+ if (motg->sleep_clk)
+ clk_disable_unprepare(motg->sleep_clk);
+put_xo_clk:
+ if (motg->xo_clk)
+ clk_put(motg->xo_clk);
+put_pclk:
+ if (motg->pclk)
+ clk_put(motg->pclk);
+put_core_clk:
+ if (motg->core_clk)
+ clk_put(motg->core_clk);
+free_motg:
+ kfree(motg);
return ret;
}
@@ -2021,28 +5235,55 @@
if (phy->otg->host || phy->otg->gadget)
return -EBUSY;
- unregister_reboot_notifier(&motg->reboot);
+ unregister_pm_notifier(&motg->pm_notify);
- /*
- * Ensure that D+/D- lines are routed to uB connector, so
- * we could load bootloader/kernel at next reboot
- */
- gpiod_set_value_cansleep(motg->switch_gpio, 0);
+ extcon_unregister_notifier(motg->extcon_id, EXTCON_USB_HOST,
+ &motg->id_nb);
+ extcon_unregister_notifier(motg->extcon_vbus, EXTCON_USB,
+ &motg->vbus_nb);
- extcon_unregister_notifier(motg->id.extcon, EXTCON_USB_HOST, &motg->id.nb);
- extcon_unregister_notifier(motg->vbus.extcon, EXTCON_USB, &motg->vbus.nb);
+ if (!motg->ext_chg_device) {
+ device_destroy(motg->ext_chg_class, motg->ext_chg_dev);
+ cdev_del(&motg->ext_chg_cdev);
+ class_destroy(motg->ext_chg_class);
+ unregister_chrdev_region(motg->ext_chg_dev, 1);
+ }
+ if (pdev->dev.of_node)
+ msm_otg_setup_devices(pdev, motg->pdata->mode, false);
+ if (psy)
+ power_supply_put(psy);
msm_otg_debugfs_cleanup();
cancel_delayed_work_sync(&motg->chg_work);
+ cancel_delayed_work_sync(&motg->id_status_work);
+ cancel_delayed_work_sync(&motg->perf_vote_work);
+ msm_otg_perf_vote_update(motg, false);
cancel_work_sync(&motg->sm_work);
+ destroy_workqueue(motg->otg_wq);
pm_runtime_resume(&pdev->dev);
device_init_wakeup(&pdev->dev, 0);
pm_runtime_disable(&pdev->dev);
+ if (motg->phy_irq)
+ free_irq(motg->phy_irq, motg);
+ if (motg->pdata->pmic_id_irq)
+ free_irq(motg->pdata->pmic_id_irq, motg);
usb_remove_phy(phy);
- disable_irq(motg->irq);
+ free_irq(motg->irq, motg);
+
+ if (motg->pdata->mpm_dpshv_int || motg->pdata->mpm_dmshv_int)
+ device_remove_file(&pdev->dev,
+ &dev_attr_dpdm_pulldown_enable);
+ if (motg->pdata->otg_control == OTG_PHY_CONTROL &&
+ motg->pdata->mpm_otgsessvld_int)
+ msm_mpm_enable_pin(motg->pdata->mpm_otgsessvld_int, 0);
+
+ if (motg->pdata->mpm_dpshv_int)
+ msm_mpm_enable_pin(motg->pdata->mpm_dpshv_int, 0);
+ if (motg->pdata->mpm_dmshv_int)
+ msm_mpm_enable_pin(motg->pdata->mpm_dmshv_int, 0);
/*
* Put PHY in low power mode.
@@ -2050,9 +5291,9 @@
ulpi_read(phy, 0x14);
ulpi_write(phy, 0x08, 0x09);
- writel(readl(USB_PORTSC) | PORTSC_PHCD, USB_PORTSC);
+ writel_relaxed(readl_relaxed(USB_PORTSC) | PORTSC_PHCD, USB_PORTSC);
while (cnt < PHY_SUSPEND_TIMEOUT_USEC) {
- if (readl(USB_PORTSC) & PORTSC_PHCD)
+ if (readl_relaxed(USB_PORTSC) & PORTSC_PHCD)
break;
udelay(1);
cnt++;
@@ -2061,34 +5302,77 @@
dev_err(phy->dev, "Unable to suspend PHY\n");
clk_disable_unprepare(motg->pclk);
- clk_disable_unprepare(motg->clk);
- if (!IS_ERR(motg->core_clk))
- clk_disable_unprepare(motg->core_clk);
- msm_hsusb_ldo_init(motg, 0);
+ clk_disable_unprepare(motg->core_clk);
+ if (motg->phy_csr_clk)
+ clk_disable_unprepare(motg->phy_csr_clk);
+ if (motg->xo_clk) {
+ clk_disable_unprepare(motg->xo_clk);
+ clk_put(motg->xo_clk);
+ }
+ if (!IS_ERR(motg->sleep_clk))
+ clk_disable_unprepare(motg->sleep_clk);
+
+ msm_hsusb_ldo_enable(motg, USB_PHY_REG_OFF);
+ msm_hsusb_ldo_init(motg, 0);
+ regulator_disable(hsusb_vdd);
+ regulator_set_voltage(hsusb_vdd,
+ vdd_val[VDD_NONE],
+ vdd_val[VDD_MAX]);
+
+ iounmap(motg->regs);
pm_runtime_set_suspended(&pdev->dev);
+ clk_put(motg->pclk);
+ clk_put(motg->core_clk);
+
+ if (motg->bus_perf_client) {
+ msm_otg_bus_vote(motg, USB_NO_PERF_VOTE);
+ msm_bus_scale_unregister_client(motg->bus_perf_client);
+ }
+
return 0;
}
+static void msm_otg_shutdown(struct platform_device *pdev)
+{
+ struct msm_otg *motg = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "OTG shutdown\n");
+ msm_hsusb_vbus_power(motg, 0);
+}
+
#ifdef CONFIG_PM
static int msm_otg_runtime_idle(struct device *dev)
{
struct msm_otg *motg = dev_get_drvdata(dev);
- struct usb_otg *otg = motg->phy.otg;
+ struct usb_phy *phy = &motg->phy;
dev_dbg(dev, "OTG runtime idle\n");
+ msm_otg_dbg_log_event(phy, "RUNTIME IDLE",
+ phy->otg->state, motg->ext_chg_active);
- /*
- * It is observed some times that a spurious interrupt
- * comes when PHY is put into LPM immediately after PHY reset.
- * This 1 sec delay also prevents entering into LPM immediately
- * after asynchronous interrupt.
- */
- if (otg->state != OTG_STATE_UNDEFINED)
- pm_schedule_suspend(dev, 1000);
+ if (phy->otg->state == OTG_STATE_UNDEFINED)
+ return -EAGAIN;
- return -EAGAIN;
+ if (motg->ext_chg_active == DEFAULT) {
+ dev_dbg(dev, "Deferring LPM\n");
+ /*
+ * Charger detection may happen in user space.
+ * Delay entering LPM by 3 sec. Otherwise we
+ * have to exit LPM when user space begins
+ * charger detection.
+ *
+ * This timer will be canceled when user space
+ * votes against LPM by incrementing PM usage
+ * counter. We enter low power mode when
+ * PM usage counter is decremented.
+ */
+ pm_schedule_suspend(dev, 3000);
+ return -EAGAIN;
+ }
+
+ return 0;
}
static int msm_otg_runtime_suspend(struct device *dev)
@@ -2096,6 +5380,8 @@
struct msm_otg *motg = dev_get_drvdata(dev);
dev_dbg(dev, "OTG runtime suspend\n");
+ msm_otg_dbg_log_event(&motg->phy, "RUNTIME SUSPEND",
+ get_pm_runtime_counter(dev), 0);
return msm_otg_suspend(motg);
}
@@ -2104,6 +5390,9 @@
struct msm_otg *motg = dev_get_drvdata(dev);
dev_dbg(dev, "OTG runtime resume\n");
+ msm_otg_dbg_log_event(&motg->phy, "RUNTIME RESUME",
+ get_pm_runtime_counter(dev), 0);
+
return msm_otg_resume(motg);
}
#endif
@@ -2114,44 +5403,64 @@
struct msm_otg *motg = dev_get_drvdata(dev);
dev_dbg(dev, "OTG PM suspend\n");
- return msm_otg_suspend(motg);
+ msm_otg_dbg_log_event(&motg->phy, "PM SUSPEND START",
+ get_pm_runtime_counter(dev),
+ atomic_read(&motg->pm_suspended));
+
+ /* flush any pending sm_work first */
+ flush_work(&motg->sm_work);
+ if (!atomic_read(&motg->in_lpm)) {
+ dev_err(dev, "Abort PM suspend!! (USB is outside LPM)\n");
+ return -EBUSY;
+ }
+ atomic_set(&motg->pm_suspended, 1);
+
+ return 0;
}
static int msm_otg_pm_resume(struct device *dev)
{
+ int ret = 0;
struct msm_otg *motg = dev_get_drvdata(dev);
- int ret;
dev_dbg(dev, "OTG PM resume\n");
+ msm_otg_dbg_log_event(&motg->phy, "PM RESUME START",
+ get_pm_runtime_counter(dev), pm_runtime_suspended(dev));
- ret = msm_otg_resume(motg);
- if (ret)
- return ret;
+ if (motg->resume_pending || motg->phy_irq_pending) {
+ msm_otg_dbg_log_event(&motg->phy, "PM RESUME BY USB",
+ motg->async_int, motg->resume_pending);
+ /* sm work if pending will start in pm notify to exit LPM */
+ }
- /*
- * Runtime PM Documentation recommends bringing the
- * device to full powered state upon resume.
- */
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
-
- return 0;
+ return ret;
}
#endif
+#ifdef CONFIG_PM
static const struct dev_pm_ops msm_otg_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(msm_otg_pm_suspend, msm_otg_pm_resume)
SET_RUNTIME_PM_OPS(msm_otg_runtime_suspend, msm_otg_runtime_resume,
msm_otg_runtime_idle)
};
+#endif
+
+static const struct of_device_id msm_otg_dt_match[] = {
+ { .compatible = "qcom,hsusb-otg",
+ },
+ {}
+};
static struct platform_driver msm_otg_driver = {
.probe = msm_otg_probe,
.remove = msm_otg_remove,
+ .shutdown = msm_otg_shutdown,
.driver = {
.name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
.pm = &msm_otg_dev_pm_ops,
+#endif
.of_match_table = msm_otg_dt_match,
},
};
diff --git a/include/linux/input/synaptics_dsx.h b/include/linux/input/synaptics_dsx.h
new file mode 100644
index 0000000..56fe12e
--- /dev/null
+++ b/include/linux/input/synaptics_dsx.h
@@ -0,0 +1,113 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#ifndef _SYNAPTICS_DSX_H_
+#define _SYNAPTICS_DSX_H_
+
+#define PLATFORM_DRIVER_NAME "synaptics_dsx"
+#define STYLUS_DRIVER_NAME "synaptics_dsx_stylus"
+#define ACTIVE_PEN_DRIVER_NAME "synaptics_dsx_active_pen"
+#define PROXIMITY_DRIVER_NAME "synaptics_dsx_proximity"
+#define GESTURE_DRIVER_NAME "synaptics_dsx_gesture"
+#define I2C_DRIVER_NAME "synaptics_dsx_i2c"
+#define SPI_DRIVER_NAME "synaptics_dsx_spi"
+
+/*
+ * struct synaptics_dsx_button_map - button map
+ * @nbuttons: number of buttons
+ * @map: pointer to array of button codes
+ */
+struct synaptics_dsx_button_map {
+ unsigned char nbuttons;
+ unsigned int *map;
+};
+
+/*
+ * struct synaptics_dsx_board_data - DSX board data
+ * @x_flip: x flip flag
+ * @y_flip: y flip flag
+ * @swap_axes: swap axes flag
+ * @irq_gpio: attention interrupt GPIO
+ * @irq_on_state: attention interrupt active state
+ * @power_gpio: power switch GPIO
+ * @power_on_state: power switch active state
+ * @reset_gpio: reset GPIO
+ * @reset_on_state: reset active state
+ * @max_y_for_2d: maximum y value for 2D area when virtual buttons are present
+ * @irq_flags: IRQ flags
+ * @i2c_addr: I2C slave address
+ * @ub_i2c_addr: microbootloader mode I2C slave address
+ * @device_descriptor_addr: HID device descriptor address
+ * @panel_x: x-axis resolution of display panel
+ * @panel_y: y-axis resolution of display panel
+ * @power_delay_ms: delay time to wait after powering up device
+ * @reset_delay_ms: delay time to wait after resetting device
+ * @reset_active_ms: reset active time
+ * @byte_delay_us: delay time between two bytes of SPI data
+ * @block_delay_us: delay time between two SPI transfers
+ * @addr_delay_us: delay time after sending address word
+ * @pwr_reg_name: pointer to name of regulator for power control
+ * @bus_reg_name: pointer to name of regulator for bus pullup control
+ * @cap_button_map: pointer to 0D button map
+ * @vir_button_map: pointer to virtual button map
+ */
+struct synaptics_dsx_board_data {
+ bool x_flip;
+ bool y_flip;
+ bool swap_axes;
+ int irq_gpio;
+ int irq_on_state;
+ int power_gpio;
+ int power_on_state;
+ int reset_gpio;
+ int reset_on_state;
+ int max_y_for_2d;
+ unsigned long irq_flags;
+ unsigned short i2c_addr;
+ unsigned short ub_i2c_addr;
+ unsigned short device_descriptor_addr;
+ unsigned int panel_x;
+ unsigned int panel_y;
+ unsigned int power_delay_ms;
+ unsigned int reset_delay_ms;
+ unsigned int reset_active_ms;
+ unsigned int byte_delay_us;
+ unsigned int block_delay_us;
+ unsigned int addr_delay_us;
+ const char *pwr_reg_name;
+ const char *bus_reg_name;
+ struct synaptics_dsx_button_map *cap_button_map;
+ struct synaptics_dsx_button_map *vir_button_map;
+};
+
+#endif
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 5b5d4c7..34ed577 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -63,6 +63,15 @@
return tsk->signal->oom_mm;
}
+/*
+ * Use this helper if tsk->mm != mm and the victim mm needs a special
+ * handling. This is guaranteed to stay true after once set.
+ */
+static inline bool mm_is_oom_victim(struct mm_struct *mm)
+{
+ return test_bit(MMF_OOM_VICTIM, &mm->flags);
+}
+
extern unsigned long oom_badness(struct task_struct *p,
struct mem_cgroup *memcg, const nodemask_t *nodemask,
unsigned long totalpages);
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 370cbcf..164abe2 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -273,6 +273,7 @@
POWER_SUPPLY_PROP_CONNECTOR_TYPE,
POWER_SUPPLY_PROP_PARALLEL_BATFET_MODE,
POWER_SUPPLY_PROP_MIN_ICL,
+ POWER_SUPPLY_PROP_MOISTURE_DETECTED,
/* Local extensions of type int64_t */
POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
/* Properties of type `const char *' */
diff --git a/include/linux/qpnp/qpnp-revid.h b/include/linux/qpnp/qpnp-revid.h
index bbc4625..14e8d79 100644
--- a/include/linux/qpnp/qpnp-revid.h
+++ b/include/linux/qpnp/qpnp-revid.h
@@ -181,6 +181,9 @@
#define PM660L_SUBTYPE 0x1A
#define PM660_SUBTYPE 0x1B
+/* PMI632 */
+#define PMI632_SUBTYPE 0x25
+
/* PMI8998 REV_ID */
#define PMI8998_V1P0_REV1 0x00
#define PMI8998_V1P0_REV2 0x00
@@ -219,6 +222,12 @@
#define PM660L_V2P0_REV3 0x00
#define PM660L_V2P0_REV4 0x02
+/* PMI632 REV_ID */
+#define PMI632_V1P0_REV1 0x00
+#define PMI632_V1P0_REV2 0x00
+#define PMI632_V1P0_REV3 0x00
+#define PMI632_V1P0_REV4 0x01
+
/* PMI8998 FAB_ID */
#define PMI8998_FAB_ID_SMIC 0x11
#define PMI8998_FAB_ID_GF 0x30
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0d4035a..62c770d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -636,6 +636,7 @@
#define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */
#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */
#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
+#define MMF_OOM_VICTIM 25 /* mm is the oom victim */
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index b2eb2d0..200c3ab 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -468,6 +468,9 @@
* @deactivated: True if gadget is deactivated - in deactivated state it cannot
* be connected.
* @connected: True if gadget is connected.
+ * @bam2bam_func_enabled; Indicates function using bam2bam is enabled or not.
+ * @extra_buf_alloc: Extra allocation size for AXI prefetch so that out of
+ * boundary access is protected.
*
* Gadgets have a mostly-portable "gadget driver" implementing device
* functions, handling all usb configurations and interfaces. Gadget
@@ -521,6 +524,9 @@
unsigned deactivated:1;
unsigned connected:1;
bool remote_wakeup;
+ bool bam2bam_func_enabled;
+ u32 extra_buf_alloc;
+ bool l1_supported;
};
#define work_to_gadget(w) (container_of((w), struct usb_gadget, work))
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
new file mode 100644
index 0000000..53d8458
--- /dev/null
+++ b/include/linux/usb/msm_hsusb.h
@@ -0,0 +1,357 @@
+/* include/linux/usb/msm_hsusb.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ * Copyright (c) 2009-2017, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ASM_ARCH_MSM_HSUSB_H
+#define __ASM_ARCH_MSM_HSUSB_H
+
+#include <linux/types.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/clk.h>
+#include <linux/pm_qos.h>
+#include <linux/hrtimer.h>
+#include <linux/power_supply.h>
+#include <linux/cdev.h>
+#include <linux/usb_bam.h>
+#include <linux/extcon.h>
+#include <linux/regulator/driver.h>
+/**
+ * Requested USB votes for NOC frequency
+ *
+ * USB_NOC_NOM_VOTE Vote for NOM set of NOC frequencies
+ * USB_NOC_SVS_VOTE Vote for SVS set of NOC frequencies
+ *
+ */
+enum usb_noc_mode {
+ USB_NOC_NOM_VOTE = 0,
+ USB_NOC_SVS_VOTE,
+ USB_NOC_NUM_VOTE,
+};
+
+/**
+ * Different states involved in USB charger detection.
+ *
+ * USB_CHG_STATE_UNDEFINED USB charger is not connected or detection
+ * process is not yet started.
+ * USB_CHG_STATE_IN_PROGRESS Charger detection in progress
+ * USB_CHG_STATE_WAIT_FOR_DCD Waiting for Data pins contact.
+ * USB_CHG_STATE_DCD_DONE Data pin contact is detected.
+ * USB_CHG_STATE_PRIMARY_DONE Primary detection is completed (Detects
+ * between SDP and DCP/CDP).
+ * USB_CHG_STATE_SECONDARY_DONE Secondary detection is completed (Detects
+ * between DCP and CDP).
+ * USB_CHG_STATE_DETECTED USB charger type is determined.
+ *
+ */
+enum usb_chg_state {
+ USB_CHG_STATE_UNDEFINED = 0,
+ USB_CHG_STATE_IN_PROGRESS,
+ USB_CHG_STATE_WAIT_FOR_DCD,
+ USB_CHG_STATE_DCD_DONE,
+ USB_CHG_STATE_PRIMARY_DONE,
+ USB_CHG_STATE_SECONDARY_DONE,
+ USB_CHG_STATE_DETECTED,
+};
+
+/**
+ * USB charger types
+ *
+ * USB_INVALID_CHARGER Invalid USB charger.
+ * USB_SDP_CHARGER Standard downstream port. Refers to a downstream port
+ * on USB2.0 compliant host/hub.
+ * USB_DCP_CHARGER Dedicated charger port (AC charger/ Wall charger).
+ * USB_CDP_CHARGER Charging downstream port. Enumeration can happen and
+ * IDEV_CHG_MAX can be drawn irrespective of USB state.
+ * USB_NONCOMPLIANT_CHARGER A non-compliant charger pull DP and DM to specific
+ * voltages between 2.0-3.3v for identification.
+ *
+ */
+enum usb_chg_type {
+ USB_INVALID_CHARGER = 0,
+ USB_SDP_CHARGER,
+ USB_DCP_CHARGER,
+ USB_CDP_CHARGER,
+ USB_NONCOMPLIANT_CHARGER,
+ USB_FLOATED_CHARGER,
+};
+
+/**
+ * Maintain state for hvdcp external charger status
+ * DEFAULT This is used when DCP is detected
+ * ACTIVE This is used when ioctl is called to block LPM
+ * INACTIVE This is used when ioctl is called to unblock LPM
+ */
+
+enum usb_ext_chg_status {
+ DEFAULT = 1,
+ ACTIVE,
+ INACTIVE,
+};
+
+/**
+ * USB ID state
+ */
+enum usb_id_state {
+ USB_ID_GROUND = 0,
+ USB_ID_FLOAT,
+};
+
+#define USB_NUM_BUS_CLOCKS 3
+
+/**
+ * struct msm_otg: OTG driver data. Shared by HCD and DCD.
+ * @otg: USB OTG Transceiver structure.
+ * @pdata: otg device platform data.
+ * @irq: IRQ number assigned for HSUSB controller.
+ * @async_irq: IRQ number used by some controllers during low power state
+ * @phy_irq: IRQ number assigned for PHY to notify events like id and line
+ state changes.
+ * @pclk: clock struct of iface_clk.
+ * @core_clk: clock struct of core_bus_clk.
+ * @sleep_clk: clock struct of sleep_clk for USB PHY.
+ * @phy_reset_clk: clock struct of phy_reset_clk for USB PHY. This clock is
+ a reset only clock and resets the PHY, ULPI bridge and
+ CSR wrapper.
+ * @phy_por_clk: clock struct of phy_por_clk for USB PHY. This clock is
+ a reset only clock and resets only the PHY (POR).
+ * @phy_csr_clk: clock struct of phy_csr_clk for USB PHY. This clock is
+ required to access PHY CSR registers via AHB2PHY interface.
+ * @bus_clks: bimc/snoc/pcnoc clock struct.
+ * @core_reset: Reset control for core_clk
+ * @phy_reset: Reset control for phy_reset_clk
+ * @phy_por_reset: Reset control for phy_por_clk
+ * @default_noc_mode: default frequency for NOC clocks - SVS or NOM
+ * @core_clk_rate: core clk max frequency
+ * @regs: ioremapped register base address.
+ * @usb_phy_ctrl_reg: relevant PHY_CTRL_REG register base address.
+ * @inputs: OTG state machine inputs(Id, SessValid etc).
+ * @sm_work: OTG state machine work.
+ * @sm_work_pending: OTG state machine work is pending, queued post pm_resume
+ * @resume_pending: USB h/w lpm_exit pending. Done on next sm_work run
+ * @pm_suspended: OTG device is system(PM) suspended.
+ * @pm_notify: Notifier to receive system wide PM transition events.
+ It is used to defer wakeup events processing until
+ system is RESUMED.
+ * @in_lpm: indicates low power mode (LPM) state.
+ * @async_int: IRQ line on which ASYNC interrupt arrived in LPM.
+ * @cur_power: The amount of mA available from downstream port.
+ * @otg_wq: Strict order otg workqueue for OTG works (SM/ID/SUSPEND).
+ * @chg_work: Charger detection work.
+ * @chg_state: The state of charger detection process.
+ * @chg_type: The type of charger attached.
+ * @bus_perf_client: Bus performance client handle to request BUS bandwidth
+ * @host_bus_suspend: indicates host bus suspend or not.
+ * @device_bus_suspend: indicates device bus suspend or not.
+ * @bus_clks_enabled: indicates pcnoc/snoc/bimc clocks are on or not.
+ * @chg_check_timer: The timer used to implement the workaround to detect
+ * very slow plug in of wall charger.
+ * @bc1p2_current_max: Max charging current allowed as per bc1.2 chg detection
+ * @typec_current_max: Max charging current allowed as per type-c chg detection
+ * @is_ext_chg_dcp: To indicate whether charger detected by external entity
+ SMB hardware is DCP charger or not.
+ * @ext_id_irq: IRQ for ID interrupt.
+ * @phy_irq_pending: Gets set when PHY IRQ arrives in LPM.
+ * @id_state: Indicates USBID line status.
+ * @rm_pulldown: Indicates pulldown status on D+ and D- data lines.
+ * @extcon_vbus: Used for VBUS notification registration.
+ * @extcon_id: Used for ID notification registration.
+ * @vbus_nb: Notification callback for VBUS event.
+ * @id_nb: Notification callback for ID event.
+ * @dpdm_desc: Regulator descriptor for D+ and D- voting.
+ * @dpdm_rdev: Regulator class device for dpdm regulator.
+ * @dbg_idx: Dynamic debug buffer Index.
+ * @dbg_lock: Dynamic debug buffer Lock.
+ * @buf: Dynamic Debug Buffer.
+ * @max_nominal_system_clk_rate: max freq at which system clock can run in
+ nominal mode.
+ */
+struct msm_otg {
+ struct usb_phy phy;
+ struct msm_otg_platform_data *pdata;
+ struct platform_device *pdev;
+ int irq;
+ int async_irq;
+ int phy_irq;
+ struct clk *xo_clk;
+ struct clk *pclk;
+ struct clk *core_clk;
+ struct clk *sleep_clk;
+ struct clk *phy_reset_clk;
+ struct clk *phy_por_clk;
+ struct clk *phy_csr_clk;
+ struct clk *bus_clks[USB_NUM_BUS_CLOCKS];
+ struct clk *phy_ref_clk;
+ struct reset_control *core_reset;
+ struct reset_control *phy_reset;
+ struct reset_control *phy_por_reset;
+ long core_clk_rate;
+ long core_clk_svs_rate;
+ long core_clk_nominal_rate;
+ enum usb_noc_mode default_noc_mode;
+ struct resource *io_res;
+ void __iomem *regs;
+ void __iomem *phy_csr_regs;
+ void __iomem *usb_phy_ctrl_reg;
+#define ID 0
+#define B_SESS_VLD 1
+#define A_BUS_SUSPEND 14
+#define B_FALSE_SDP 18
+ unsigned long inputs;
+ struct work_struct sm_work;
+ bool sm_work_pending;
+ bool resume_pending;
+ atomic_t pm_suspended;
+ struct notifier_block pm_notify;
+ atomic_t in_lpm;
+ bool err_event_seen;
+ int async_int;
+ unsigned int cur_power;
+ struct workqueue_struct *otg_wq;
+ struct delayed_work chg_work;
+ struct delayed_work id_status_work;
+ enum usb_chg_state chg_state;
+ enum usb_chg_type chg_type;
+ unsigned int dcd_time;
+ unsigned long caps;
+ uint32_t bus_perf_client;
+ bool host_bus_suspend;
+ bool device_bus_suspend;
+ bool bus_clks_enabled;
+ struct timer_list chg_check_timer;
+ /*
+ * Allowing PHY power collpase turns off the HSUSB 3.3v and 1.8v
+ * analog regulators while going to low power mode.
+ * Currently only 28nm PHY has the support to allowing PHY
+ * power collapse since it doesn't have leakage currents while
+ * turning off the power rails.
+ */
+#define ALLOW_PHY_POWER_COLLAPSE BIT(0)
+ /*
+ * Allow PHY RETENTION mode before turning off the digital
+ * voltage regulator(VDDCX).
+ */
+#define ALLOW_PHY_RETENTION BIT(1)
+ /*
+ * Allow putting the core in Low Power mode, when
+ * USB bus is suspended but cable is connected.
+ */
+#define ALLOW_LPM_ON_DEV_SUSPEND BIT(2)
+ /*
+ * Allowing PHY regulators LPM puts the HSUSB 3.3v and 1.8v
+ * analog regulators into LPM while going to USB low power mode.
+ */
+#define ALLOW_PHY_REGULATORS_LPM BIT(3)
+ /*
+ * Allow PHY RETENTION mode before turning off the digital
+ * voltage regulator(VDDCX) during host mode.
+ */
+#define ALLOW_HOST_PHY_RETENTION BIT(4)
+ /*
+ * Allow VDD minimization without putting PHY into retention
+ * for fixing PHY current leakage issue when LDOs ar turned off.
+ */
+#define ALLOW_VDD_MIN_WITH_RETENTION_DISABLED BIT(5)
+
+ /*
+ * PHY can keep D+ pull-up during peripheral bus suspend and
+ * D+/D- pull-down during host bus suspend without any
+ * re-work. This is possible only when PHY DVDD is supplied
+ * by a PMIC LDO (unlike VDDCX/VDDMX).
+ */
+#define ALLOW_BUS_SUSPEND_WITHOUT_REWORK BIT(6)
+ unsigned long lpm_flags;
+#define PHY_PWR_COLLAPSED BIT(0)
+#define PHY_RETENTIONED BIT(1)
+#define XO_SHUTDOWN BIT(2)
+#define CLOCKS_DOWN BIT(3)
+#define PHY_REGULATORS_LPM BIT(4)
+ int reset_counter;
+ unsigned int online;
+ unsigned int host_mode;
+ unsigned int bc1p2_current_max;
+ unsigned int typec_current_max;
+
+ dev_t ext_chg_dev;
+ struct cdev ext_chg_cdev;
+ struct class *ext_chg_class;
+ struct device *ext_chg_device;
+ bool ext_chg_opened;
+ enum usb_ext_chg_status ext_chg_active;
+ struct completion ext_chg_wait;
+ struct pinctrl *phy_pinctrl;
+ bool is_ext_chg_dcp;
+ struct qpnp_vadc_chip *vadc_dev;
+ int ext_id_irq;
+ bool phy_irq_pending;
+ enum usb_id_state id_state;
+ bool rm_pulldown;
+ struct extcon_dev *extcon_vbus;
+ struct extcon_dev *extcon_id;
+ struct notifier_block vbus_nb;
+ struct notifier_block id_nb;
+ struct regulator_desc dpdm_rdesc;
+ struct regulator_dev *dpdm_rdev;
+/* Maximum debug message length */
+#define DEBUG_MSG_LEN 128UL
+/* Maximum number of messages */
+#define DEBUG_MAX_MSG 256UL
+ unsigned int dbg_idx;
+ rwlock_t dbg_lock;
+
+ char (buf[DEBUG_MAX_MSG])[DEBUG_MSG_LEN]; /* buffer */
+ unsigned int vbus_state;
+ unsigned int usb_irq_count;
+ int pm_qos_latency;
+ struct pm_qos_request pm_qos_req_dma;
+ struct delayed_work perf_vote_work;
+};
+
+struct ci13xxx_platform_data {
+ u8 usb_core_id;
+ /*
+ * value of 2^(log2_itc-1) will be used as the interrupt threshold
+ * (ITC), when log2_itc is between 1 to 7.
+ */
+ int log2_itc;
+ bool l1_supported;
+ bool enable_ahb2ahb_bypass;
+ bool enable_streaming;
+ bool enable_axi_prefetch;
+};
+
+#ifdef CONFIG_USB_BAM
+void msm_bam_set_usb_host_dev(struct device *dev);
+bool msm_usb_bam_enable(enum usb_ctrl ctrl, bool bam_enable);
+int msm_do_bam_disable_enable(enum usb_ctrl ctrl);
+#else
+static inline void msm_bam_set_usb_host_dev(struct device *dev) {}
+static inline bool msm_usb_bam_enable(enum usb_ctrl ctrl, bool bam_enable)
+{
+ return true;
+}
+int msm_do_bam_disable_enable(enum usb_ctrl ctrl) { return true; }
+#endif
+#ifdef CONFIG_USB_CI13XXX_MSM
+void msm_hw_soft_reset(void);
+#else
+static inline void msm_hw_soft_reset(void)
+{
+}
+#endif
+
+#endif
diff --git a/include/linux/usb/msm_hsusb_hw.h b/include/linux/usb/msm_hsusb_hw.h
index 974c379..b86f127 100644
--- a/include/linux/usb/msm_hsusb_hw.h
+++ b/include/linux/usb/msm_hsusb_hw.h
@@ -21,10 +21,14 @@
#define USB_AHBBURST (MSM_USB_BASE + 0x0090)
#define USB_AHBMODE (MSM_USB_BASE + 0x0098)
+#define USB_GENCONFIG (MSM_USB_BASE + 0x009C)
#define USB_GENCONFIG_2 (MSM_USB_BASE + 0x00a0)
#define ULPI_TX_PKT_EN_CLR_FIX BIT(19)
#define USB_CAPLENGTH (MSM_USB_BASE + 0x0100) /* 8 bit */
+#define USB_HS_APF_CTRL (MSM_USB_BASE + 0x0380)
+
+#define APF_CTRL_EN BIT(0)
#define USB_USBCMD (MSM_USB_BASE + 0x0140)
#define USB_PORTSC (MSM_USB_BASE + 0x0184)
@@ -34,15 +38,32 @@
#define USB_PHY_CTRL2 (MSM_USB_BASE + 0x0278)
#define GENCONFIG_2_SESS_VLD_CTRL_EN BIT(7)
+#define GENCONFIG_2_LINESTATE_DIFF_WAKEUP_EN BIT(12)
+#define GENCONFIG_2_DPSE_DMSE_HV_INTR_EN BIT(15)
#define USBCMD_SESS_VLD_CTRL BIT(25)
#define USBCMD_RESET 2
#define USB_USBINTR (MSM_USB_BASE + 0x0148)
+#define USB_L1_EP_CTRL (MSM_USB_BASE + 0x0250)
+#define USB_L1_CONFIG (MSM_USB_BASE + 0x0254)
+
+#define L1_CONFIG_LPM_EN BIT(4)
+#define L1_CONFIG_REMOTE_WAKEUP BIT(5)
+#define L1_CONFIG_GATE_SYS_CLK BIT(7)
+#define L1_CONFIG_PHY_LPM BIT(10)
+#define L1_CONFIG_PLL BIT(11)
+#define AHB2AHB_BYPASS BIT(31)
+#define AHB2AHB_BYPASS_BIT_MASK BIT(31)
+#define AHB2AHB_BYPASS_CLEAR (0 << 31)
+
#define PORTSC_PHCD (1 << 23) /* phy suspend mode */
#define PORTSC_PTS_MASK (3 << 30)
#define PORTSC_PTS_ULPI (2 << 30)
#define PORTSC_PTS_SERIAL (3 << 30)
+#define PORTSC_LS (3 << 10)
+#define PORTSC_LS_DM (1 << 10)
+#define PORTSC_CCS (1 << 0)
#define USB_ULPI_VIEWPORT (MSM_USB_BASE + 0x0170)
#define ULPI_RUN (1 << 30)
@@ -52,6 +73,10 @@
#define ULPI_DATA(n) ((n) & 255)
#define ULPI_DATA_READ(n) (((n) >> 8) & 255)
+#define GENCONFIG_BAM_DISABLE (1 << 13)
+#define GENCONFIG_TXFIFO_IDLE_FORCE_DISABLE (1 << 4)
+#define GENCONFIG_ULPI_SERIAL_EN (1 << 5)
+
/* synopsys 28nm phy registers */
#define ULPI_PWR_CLK_MNG_REG 0x88
#define OTG_COMP_DISABLE BIT(0)
@@ -63,10 +88,16 @@
#define ASYNC_INTR_CTRL (1 << 29) /* Enable async interrupt */
#define ULPI_STP_CTRL (1 << 30) /* Block communication with PHY */
#define PHY_RETEN (1 << 1) /* PHY retention enable/disable */
+#define PHY_IDHV_INTEN (1 << 8) /* PHY ID HV interrupt */
+#define PHY_OTGSESSVLDHV_INTEN (1 << 9) /* PHY Session Valid HV int. */
+#define PHY_CLAMP_DPDMSE_EN (1 << 21) /* PHY mpm DP DM clamp enable */
+#define PHY_POR_BIT_MASK BIT(0)
#define PHY_POR_ASSERT (1 << 0) /* USB2 28nm PHY POR ASSERT */
+#define PHY_POR_DEASSERT (0 << 0) /* USB2 28nm PHY POR DEASSERT */
/* OTG definitions */
#define OTGSC_INTSTS_MASK (0x7f << 16)
+#define OTGSC_IDPU (1 << 5)
#define OTGSC_ID (1 << 8)
#define OTGSC_BSV (1 << 11)
#define OTGSC_IDIS (1 << 16)
@@ -74,4 +105,29 @@
#define OTGSC_IDIE (1 << 24)
#define OTGSC_BSVIE (1 << 27)
+/* USB PHY CSR registers and bit definitions */
+
+#define USB_PHY_CSR_PHY_CTRL_COMMON0 (MSM_USB_PHY_CSR_BASE + 0x078)
+#define SIDDQ BIT(2)
+
+#define USB_PHY_CSR_PHY_CTRL1 (MSM_USB_PHY_CSR_BASE + 0x08C)
+#define ID_HV_CLAMP_EN_N BIT(1)
+
+#define USB_PHY_CSR_PHY_CTRL3 (MSM_USB_PHY_CSR_BASE + 0x094)
+#define CLAMP_MPM_DPSE_DMSE_EN_N BIT(2)
+
+#define USB2_PHY_USB_PHY_IRQ_CMD (MSM_USB_PHY_CSR_BASE + 0x0D0)
+#define USB2_PHY_USB_PHY_INTERRUPT_SRC_STATUS (MSM_USB_PHY_CSR_BASE + 0x05C)
+
+#define USB2_PHY_USB_PHY_INTERRUPT_CLEAR0 (MSM_USB_PHY_CSR_BASE + 0x0DC)
+#define USB2_PHY_USB_PHY_INTERRUPT_CLEAR1 (MSM_USB_PHY_CSR_BASE + 0x0E0)
+
+#define USB2_PHY_USB_PHY_INTERRUPT_MASK1 (MSM_USB_PHY_CSR_BASE + 0x0D8)
+
+#define USB_PHY_IDDIG_1_0 BIT(7)
+
+#define USB_PHY_IDDIG_RISE_MASK BIT(0)
+#define USB_PHY_IDDIG_FALL_MASK BIT(1)
+#define USB_PHY_ID_MASK (USB_PHY_IDDIG_RISE_MASK | USB_PHY_IDDIG_FALL_MASK)
+
#endif /* __LINUX_USB_GADGET_MSM72K_UDC_H__ */
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index 64aa52e..d999b3c 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -58,6 +58,7 @@
OTG_STATE_B_SRP_INIT,
OTG_STATE_B_PERIPHERAL,
OTG_STATE_B_SUSPEND,
+ OTG_STATE_B_CHARGER,
/* extra dual-role default-b states */
OTG_STATE_B_WAIT_ACON,
@@ -141,6 +142,10 @@
/* reset the PHY clocks */
int (*reset)(struct usb_phy *x);
+
+ /* for notification of usb_phy_dbg_events */
+ void (*dbg_event)(struct usb_phy *x,
+ char *event, int msg1, int msg2);
int (*disable_chirp)(struct usb_phy *x, bool disable);
};
diff --git a/include/soc/qcom/msm_tz_smmu.h b/include/soc/qcom/msm_tz_smmu.h
new file mode 100644
index 0000000..a83c9bd
--- /dev/null
+++ b/include/soc/qcom/msm_tz_smmu.h
@@ -0,0 +1,93 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_TZ_SMMU_H__
+#define __MSM_TZ_SMMU_H__
+
+#include <linux/device.h>
+
+enum tz_smmu_device_id {
+ TZ_DEVICE_START = 0,
+ TZ_DEVICE_VIDEO = 0,
+ TZ_DEVICE_MDSS,
+ TZ_DEVICE_LPASS,
+ TZ_DEVICE_MDSS_BOOT,
+ TZ_DEVICE_USB1_HS,
+ TZ_DEVICE_OCMEM,
+ TZ_DEVICE_LPASS_CORE,
+ TZ_DEVICE_VPU,
+ TZ_DEVICE_COPSS_SMMU,
+ TZ_DEVICE_USB3_0,
+ TZ_DEVICE_USB3_1,
+ TZ_DEVICE_PCIE_0,
+ TZ_DEVICE_PCIE_1,
+ TZ_DEVICE_BCSS,
+ TZ_DEVICE_VCAP,
+ TZ_DEVICE_PCIE20,
+ TZ_DEVICE_IPA,
+ TZ_DEVICE_APPS,
+ TZ_DEVICE_GPU,
+ TZ_DEVICE_UFS,
+ TZ_DEVICE_ICE,
+ TZ_DEVICE_ROT,
+ TZ_DEVICE_VFE,
+ TZ_DEVICE_ANOC0,
+ TZ_DEVICE_ANOC1,
+ TZ_DEVICE_ANOC2,
+ TZ_DEVICE_CPP,
+ TZ_DEVICE_JPEG,
+ TZ_DEVICE_MAX,
+};
+
+#ifdef CONFIG_MSM_TZ_SMMU
+
+int msm_tz_smmu_atos_start(struct device *dev, int cb_num);
+int msm_tz_smmu_atos_end(struct device *dev, int cb_num);
+enum tz_smmu_device_id msm_dev_to_device_id(struct device *dev);
+int msm_tz_set_cb_format(enum tz_smmu_device_id sec_id, int cbndx);
+int msm_iommu_sec_pgtbl_init(void);
+int register_iommu_sec_ptbl(void);
+#else
+
+static inline int msm_tz_smmu_atos_start(struct device *dev, int cb_num)
+{
+ return 0;
+}
+
+static inline int msm_tz_smmu_atos_end(struct device *dev, int cb_num)
+{
+ return 0;
+}
+
+static inline enum tz_smmu_device_id msm_dev_to_device_id(struct device *dev)
+{
+ return -EINVAL;
+}
+
+static inline int msm_tz_set_cb_format(enum tz_smmu_device_id sec_id,
+ int cbndx)
+{
+ return -EINVAL;
+}
+
+static inline int msm_iommu_sec_pgtbl_init(void)
+{
+ return -EINVAL;
+}
+
+static inline int register_iommu_sec_ptbl(void)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_MSM_TZ_SMMU */
+
+#endif /* __MSM_TZ_SMMU_H__ */
diff --git a/include/soc/qcom/secure_buffer.h b/include/soc/qcom/secure_buffer.h
index 12fa374..d9a526d 100644
--- a/include/soc/qcom/secure_buffer.h
+++ b/include/soc/qcom/secure_buffer.h
@@ -50,6 +50,8 @@
#define PERM_EXEC 0x1
#ifdef CONFIG_QCOM_SECURE_BUFFER
+int msm_secure_table(struct sg_table *table);
+int msm_unsecure_table(struct sg_table *table);
int hyp_assign_table(struct sg_table *table,
u32 *source_vm_list, int source_nelems,
int *dest_vmids, int *dest_perms,
@@ -57,8 +59,19 @@
extern int hyp_assign_phys(phys_addr_t addr, u64 size,
u32 *source_vmlist, int source_nelems,
int *dest_vmids, int *dest_perms, int dest_nelems);
+bool msm_secure_v2_is_supported(void);
const char *msm_secure_vmid_to_string(int secure_vmid);
#else
+static inline int msm_secure_table(struct sg_table *table)
+{
+ return -EINVAL;
+}
+
+static inline int msm_unsecure_table(struct sg_table *table)
+{
+ return -EINVAL;
+}
+
static inline int hyp_assign_table(struct sg_table *table,
u32 *source_vm_list, int source_nelems,
int *dest_vmids, int *dest_perms,
@@ -74,6 +87,11 @@
return -EINVAL;
}
+static inline bool msm_secure_v2_is_supported(void)
+{
+ return false;
+}
+
static inline const char *msm_secure_vmid_to_string(int secure_vmid)
{
return "N/A";
diff --git a/include/trace/events/rpmh.h b/include/trace/events/rpmh.h
index 919877d..fdd8720 100644
--- a/include/trace/events/rpmh.h
+++ b/include/trace/events/rpmh.h
@@ -54,12 +54,14 @@
TRACE_EVENT(rpmh_send_msg,
- TP_PROTO(const char *s, int m, int n, u32 h, u32 a, u32 v, bool c, bool t),
+ TP_PROTO(const char *s, unsigned long b, int m, int n, u32 h, u32 a,
+ u32 v, bool c, bool t),
- TP_ARGS(s, m, n, h, a, v, c, t),
+ TP_ARGS(s, b, m, n, h, a, v, c, t),
TP_STRUCT__entry(
__field(const char*, name)
+ __field(unsigned long, base)
__field(int, m)
__field(int, n)
__field(u32, hdr)
@@ -71,6 +73,7 @@
TP_fast_assign(
__entry->name = s;
+ __entry->base = b;
__entry->m = m;
__entry->n = n;
__entry->hdr = h;
@@ -80,9 +83,10 @@
__entry->trigger = t;
),
- TP_printk("%s: send-msg: tcs(m): %d cmd(n): %d msgid: 0x%08x addr: 0x%08x data: 0x%08x complete: %d trigger: %d",
- __entry->name, __entry->m, __entry->n, __entry->hdr,
- __entry->addr, __entry->data, __entry->complete, __entry->trigger)
+ TP_printk("%s: reg: 0x%08lx send-msg: tcs(m): %d cmd(n): %d msgid: 0x%08x addr: 0x%08x data: 0x%08x complete: %d trigger: %d",
+ __entry->name, __entry->base, __entry->m,
+ __entry->n, __entry->hdr, __entry->addr,
+ __entry->data, __entry->complete, __entry->trigger)
);
TRACE_EVENT(rpmh_control_msg,
diff --git a/include/uapi/linux/qseecom.h b/include/uapi/linux/qseecom.h
index 94e9b00..55c71dd 100644
--- a/include/uapi/linux/qseecom.h
+++ b/include/uapi/linux/qseecom.h
@@ -7,6 +7,11 @@
#define MAX_ION_FD 4
#define MAX_APP_NAME_SIZE 64
#define QSEECOM_HASH_SIZE 32
+
+/* qseecom_ta_heap allocation retry delay (ms) and max attemp count */
+#define QSEECOM_TA_ION_ALLOCATE_DELAY 50
+#define QSEECOM_TA_ION_ALLOCATE_MAX_ATTEMP 20
+
/*
* struct qseecom_register_listener_req -
* for register listener ioctl request
diff --git a/include/uapi/media/cam_defs.h b/include/uapi/media/cam_defs.h
index cabf0a8..e006463 100644
--- a/include/uapi/media/cam_defs.h
+++ b/include/uapi/media/cam_defs.h
@@ -15,7 +15,8 @@
#define CAM_CONFIG_DEV (CAM_COMMON_OPCODE_BASE + 0x5)
#define CAM_RELEASE_DEV (CAM_COMMON_OPCODE_BASE + 0x6)
#define CAM_SD_SHUTDOWN (CAM_COMMON_OPCODE_BASE + 0x7)
-#define CAM_COMMON_OPCODE_MAX (CAM_COMMON_OPCODE_BASE + 0x8)
+#define CAM_FLUSH_REQ (CAM_COMMON_OPCODE_BASE + 0x8)
+#define CAM_COMMON_OPCODE_MAX (CAM_COMMON_OPCODE_BASE + 0x9)
#define CAM_EXT_OPCODE_BASE 0x200
#define CAM_CONFIG_DEV_EXTERNAL (CAM_EXT_OPCODE_BASE + 0x1)
@@ -43,6 +44,20 @@
#define CAM_CMD_BUF_LEGACY 0xA
/**
+ * enum flush_type_t - Identifies the various flush types
+ *
+ * @CAM_FLUSH_TYPE_REQ: Flush specific request
+ * @CAM_FLUSH_TYPE_ALL: Flush all requests belonging to a context
+ * @CAM_FLUSH_TYPE_MAX: Max enum to validate flush type
+ *
+ */
+enum flush_type_t {
+ CAM_FLUSH_TYPE_REQ,
+ CAM_FLUSH_TYPE_ALL,
+ CAM_FLUSH_TYPE_MAX
+};
+
+/**
* struct cam_control - Structure used by ioctl control for camera
*
* @op_code: This is the op code for camera control
@@ -437,4 +452,26 @@
uint64_t resource_hdl;
};
+/**
+ * struct cam_flush_dev_cmd - Control payload for flush devices
+ *
+ * @version: Version
+ * @session_handle: Session handle for the acquire command
+ * @dev_handle: Device handle to be returned
+ * @flush_type: Flush type:
+ * 0 = flush specific request
+ * 1 = flush all
+ * @reserved: Reserved for 64 bit aligngment
+ * @req_id: Request id that needs to cancel
+ *
+ */
+struct cam_flush_dev_cmd {
+ uint64_t version;
+ int32_t session_handle;
+ int32_t dev_handle;
+ uint32_t flush_type;
+ uint32_t reserved;
+ int64_t req_id;
+};
+
#endif /* __UAPI_CAM_DEFS_H__ */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index bbe783e..f372872 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5100,6 +5100,14 @@
raw_spin_lock_irqsave(&p->pi_lock, flags);
cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
+
+ /* The userspace tasks are forbidden to run on
+ * isolated CPUs. So exclude isolated CPUs from
+ * the getaffinity.
+ */
+ if (!(p->flags & PF_KTHREAD))
+ cpumask_andnot(mask, mask, cpu_isolated_mask);
+
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
out_unlock:
diff --git a/mm/mmap.c b/mm/mmap.c
index 7e6c049..c30a61e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2984,20 +2984,20 @@
/* Use -1 here to ensure all VMAs in the mm are unmapped */
unmap_vmas(&tlb, vma, 0, -1);
- set_bit(MMF_OOM_SKIP, &mm->flags);
- if (unlikely(tsk_is_oom_victim(current))) {
+ if (unlikely(mm_is_oom_victim(mm))) {
/*
* Wait for oom_reap_task() to stop working on this
* mm. Because MMF_OOM_SKIP is already set before
* calling down_read(), oom_reap_task() will not run
* on this "mm" post up_write().
*
- * tsk_is_oom_victim() cannot be set from under us
- * either because current->mm is already set to NULL
+ * mm_is_oom_victim() cannot be set from under us
+ * either because victim->mm is already set to NULL
* under task_lock before calling mmput and oom_mm is
- * set not NULL by the OOM killer only if current->mm
+ * set not NULL by the OOM killer only if victim->mm
* is found not NULL while holding the task_lock.
*/
+ set_bit(MMF_OOM_SKIP, &mm->flags);
down_write(&mm->mmap_sem);
up_write(&mm->mmap_sem);
}
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index af9a8a6..6fd9773 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -677,8 +677,10 @@
return;
/* oom_mm is bound to the signal struct life time. */
- if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
+ if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
atomic_inc(&tsk->signal->oom_mm->mm_count);
+ set_bit(MMF_OOM_VICTIM, &mm->flags);
+ }
/*
* Make sure that the task is woken up from uninterruptible sleep
diff --git a/net/core/dev.c b/net/core/dev.c
index 5685744..49f17ff 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4171,6 +4171,9 @@
return 0;
}
+int (*gsb_nw_stack_recv)(struct sk_buff *skb) __rcu __read_mostly;
+EXPORT_SYMBOL(gsb_nw_stack_recv);
+
int (*athrs_fast_nat_recv)(struct sk_buff *skb) __rcu __read_mostly;
EXPORT_SYMBOL(athrs_fast_nat_recv);
@@ -4185,6 +4188,7 @@
bool deliver_exact = false;
int ret = NET_RX_DROP;
__be16 type;
+ int (*gsb_ns_recv)(struct sk_buff *skb);
int (*fast_recv)(struct sk_buff *skb);
int (*embms_recv)(struct sk_buff *skb);
@@ -4246,6 +4250,13 @@
goto out;
}
#endif
+ gsb_ns_recv = rcu_dereference(gsb_nw_stack_recv);
+ if (gsb_ns_recv) {
+ if (gsb_ns_recv(skb)) {
+ ret = NET_RX_SUCCESS;
+ goto out;
+ }
+ }
fast_recv = rcu_dereference(athrs_fast_nat_recv);
if (fast_recv) {
if (fast_recv(skb)) {
diff --git a/net/rmnet_data/rmnet_map_data.c b/net/rmnet_data/rmnet_map_data.c
index 669a890..cc377bb 100644
--- a/net/rmnet_data/rmnet_map_data.c
+++ b/net/rmnet_data/rmnet_map_data.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -192,10 +192,6 @@
memset(&config->agg_time, 0, sizeof(struct timespec));
}
config->agg_state = RMNET_MAP_AGG_IDLE;
- } else {
- /* How did we get here? */
- LOGE("Ran queued command when state %s",
- "is idle. State machine likely broken");
}
spin_unlock_irqrestore(&config->agg_lock, flags);